2f191590ebbacbf6cdab2ded70c02c5fe3b3220e
[dpdk.git] / drivers / net / softnic / rte_eth_softnic.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
44 #include <rte_ring.h>
45 #include <rte_sched.h>
46
47 #include "rte_eth_softnic.h"
48 #include "rte_eth_softnic_internals.h"
49
50 #define DEV_HARD(p)                                     \
51         (&rte_eth_devices[p->hard.port_id])
52
53 #define PMD_PARAM_SOFT_TM                                       "soft_tm"
54 #define PMD_PARAM_SOFT_TM_RATE                          "soft_tm_rate"
55 #define PMD_PARAM_SOFT_TM_NB_QUEUES                     "soft_tm_nb_queues"
56 #define PMD_PARAM_SOFT_TM_QSIZE0                        "soft_tm_qsize0"
57 #define PMD_PARAM_SOFT_TM_QSIZE1                        "soft_tm_qsize1"
58 #define PMD_PARAM_SOFT_TM_QSIZE2                        "soft_tm_qsize2"
59 #define PMD_PARAM_SOFT_TM_QSIZE3                        "soft_tm_qsize3"
60 #define PMD_PARAM_SOFT_TM_ENQ_BSZ                       "soft_tm_enq_bsz"
61 #define PMD_PARAM_SOFT_TM_DEQ_BSZ                       "soft_tm_deq_bsz"
62
63 #define PMD_PARAM_HARD_NAME                                     "hard_name"
64 #define PMD_PARAM_HARD_TX_QUEUE_ID                      "hard_tx_queue_id"
65
66 static const char *pmd_valid_args[] = {
67         PMD_PARAM_SOFT_TM,
68         PMD_PARAM_SOFT_TM_RATE,
69         PMD_PARAM_SOFT_TM_NB_QUEUES,
70         PMD_PARAM_SOFT_TM_QSIZE0,
71         PMD_PARAM_SOFT_TM_QSIZE1,
72         PMD_PARAM_SOFT_TM_QSIZE2,
73         PMD_PARAM_SOFT_TM_QSIZE3,
74         PMD_PARAM_SOFT_TM_ENQ_BSZ,
75         PMD_PARAM_SOFT_TM_DEQ_BSZ,
76         PMD_PARAM_HARD_NAME,
77         PMD_PARAM_HARD_TX_QUEUE_ID,
78         NULL
79 };
80
81 static const struct rte_eth_dev_info pmd_dev_info = {
82         .min_rx_bufsize = 0,
83         .max_rx_pktlen = UINT32_MAX,
84         .max_rx_queues = UINT16_MAX,
85         .max_tx_queues = UINT16_MAX,
86         .rx_desc_lim = {
87                 .nb_max = UINT16_MAX,
88                 .nb_min = 0,
89                 .nb_align = 1,
90         },
91         .tx_desc_lim = {
92                 .nb_max = UINT16_MAX,
93                 .nb_min = 0,
94                 .nb_align = 1,
95         },
96 };
97
98 static void
99 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
100         struct rte_eth_dev_info *dev_info)
101 {
102         memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
103 }
104
105 static int
106 pmd_dev_configure(struct rte_eth_dev *dev)
107 {
108         struct pmd_internals *p = dev->data->dev_private;
109         struct rte_eth_dev *hard_dev = DEV_HARD(p);
110
111         if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
112                 return -1;
113
114         if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
115                 return -1;
116
117         return 0;
118 }
119
120 static int
121 pmd_rx_queue_setup(struct rte_eth_dev *dev,
122         uint16_t rx_queue_id,
123         uint16_t nb_rx_desc __rte_unused,
124         unsigned int socket_id,
125         const struct rte_eth_rxconf *rx_conf __rte_unused,
126         struct rte_mempool *mb_pool __rte_unused)
127 {
128         struct pmd_internals *p = dev->data->dev_private;
129
130         if (p->params.soft.intrusive == 0) {
131                 struct pmd_rx_queue *rxq;
132
133                 rxq = rte_zmalloc_socket(p->params.soft.name,
134                         sizeof(struct pmd_rx_queue), 0, socket_id);
135                 if (rxq == NULL)
136                         return -ENOMEM;
137
138                 rxq->hard.port_id = p->hard.port_id;
139                 rxq->hard.rx_queue_id = rx_queue_id;
140                 dev->data->rx_queues[rx_queue_id] = rxq;
141         } else {
142                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
143                 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
144
145                 if (rxq == NULL)
146                         return -1;
147
148                 dev->data->rx_queues[rx_queue_id] = rxq;
149         }
150         return 0;
151 }
152
153 static int
154 pmd_tx_queue_setup(struct rte_eth_dev *dev,
155         uint16_t tx_queue_id,
156         uint16_t nb_tx_desc,
157         unsigned int socket_id,
158         const struct rte_eth_txconf *tx_conf __rte_unused)
159 {
160         uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
161         char name[size];
162         struct rte_ring *r;
163
164         snprintf(name, sizeof(name), "%s_txq%04x",
165                 dev->data->name, tx_queue_id);
166         r = rte_ring_create(name, nb_tx_desc, socket_id,
167                 RING_F_SP_ENQ | RING_F_SC_DEQ);
168         if (r == NULL)
169                 return -1;
170
171         dev->data->tx_queues[tx_queue_id] = r;
172         return 0;
173 }
174
175 static int
176 pmd_dev_start(struct rte_eth_dev *dev)
177 {
178         struct pmd_internals *p = dev->data->dev_private;
179
180         if (tm_used(dev)) {
181                 int status = tm_start(p);
182
183                 if (status)
184                         return status;
185         }
186
187         dev->data->dev_link.link_status = ETH_LINK_UP;
188
189         if (p->params.soft.intrusive) {
190                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
191
192                 /* The hard_dev->rx_pkt_burst should be stable by now */
193                 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
194         }
195
196         return 0;
197 }
198
199 static void
200 pmd_dev_stop(struct rte_eth_dev *dev)
201 {
202         struct pmd_internals *p = dev->data->dev_private;
203
204         dev->data->dev_link.link_status = ETH_LINK_DOWN;
205
206         if (tm_used(dev))
207                 tm_stop(p);
208 }
209
210 static void
211 pmd_dev_close(struct rte_eth_dev *dev)
212 {
213         uint32_t i;
214
215         /* TX queues */
216         for (i = 0; i < dev->data->nb_tx_queues; i++)
217                 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
218 }
219
220 static int
221 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
222         int wait_to_complete __rte_unused)
223 {
224         return 0;
225 }
226
227 static const struct eth_dev_ops pmd_ops = {
228         .dev_configure = pmd_dev_configure,
229         .dev_start = pmd_dev_start,
230         .dev_stop = pmd_dev_stop,
231         .dev_close = pmd_dev_close,
232         .link_update = pmd_link_update,
233         .dev_infos_get = pmd_dev_infos_get,
234         .rx_queue_setup = pmd_rx_queue_setup,
235         .tx_queue_setup = pmd_tx_queue_setup,
236         .tm_ops_get = NULL,
237 };
238
239 static uint16_t
240 pmd_rx_pkt_burst(void *rxq,
241         struct rte_mbuf **rx_pkts,
242         uint16_t nb_pkts)
243 {
244         struct pmd_rx_queue *rx_queue = rxq;
245
246         return rte_eth_rx_burst(rx_queue->hard.port_id,
247                 rx_queue->hard.rx_queue_id,
248                 rx_pkts,
249                 nb_pkts);
250 }
251
252 static uint16_t
253 pmd_tx_pkt_burst(void *txq,
254         struct rte_mbuf **tx_pkts,
255         uint16_t nb_pkts)
256 {
257         return (uint16_t)rte_ring_enqueue_burst(txq,
258                 (void **)tx_pkts,
259                 nb_pkts,
260                 NULL);
261 }
262
263 static __rte_always_inline int
264 run_default(struct rte_eth_dev *dev)
265 {
266         struct pmd_internals *p = dev->data->dev_private;
267
268         /* Persistent context: Read Only (update not required) */
269         struct rte_mbuf **pkts = p->soft.def.pkts;
270         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
271
272         /* Persistent context: Read - Write (update required) */
273         uint32_t txq_pos = p->soft.def.txq_pos;
274         uint32_t pkts_len = p->soft.def.pkts_len;
275         uint32_t flush_count = p->soft.def.flush_count;
276
277         /* Not part of the persistent context */
278         uint32_t pos;
279         uint16_t i;
280
281         /* Soft device TXQ read, Hard device TXQ write */
282         for (i = 0; i < nb_tx_queues; i++) {
283                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
284
285                 /* Read soft device TXQ burst to packet enqueue buffer */
286                 pkts_len += rte_ring_sc_dequeue_burst(txq,
287                         (void **)&pkts[pkts_len],
288                         DEFAULT_BURST_SIZE,
289                         NULL);
290
291                 /* Increment soft device TXQ */
292                 txq_pos++;
293                 if (txq_pos >= nb_tx_queues)
294                         txq_pos = 0;
295
296                 /* Hard device TXQ write when complete burst is available */
297                 if (pkts_len >= DEFAULT_BURST_SIZE) {
298                         for (pos = 0; pos < pkts_len; )
299                                 pos += rte_eth_tx_burst(p->hard.port_id,
300                                         p->params.hard.tx_queue_id,
301                                         &pkts[pos],
302                                         (uint16_t)(pkts_len - pos));
303
304                         pkts_len = 0;
305                         flush_count = 0;
306                         break;
307                 }
308         }
309
310         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
311                 for (pos = 0; pos < pkts_len; )
312                         pos += rte_eth_tx_burst(p->hard.port_id,
313                                 p->params.hard.tx_queue_id,
314                                 &pkts[pos],
315                                 (uint16_t)(pkts_len - pos));
316
317                 pkts_len = 0;
318                 flush_count = 0;
319         }
320
321         p->soft.def.txq_pos = txq_pos;
322         p->soft.def.pkts_len = pkts_len;
323         p->soft.def.flush_count = flush_count + 1;
324
325         return 0;
326 }
327
328 static __rte_always_inline int
329 run_tm(struct rte_eth_dev *dev)
330 {
331         struct pmd_internals *p = dev->data->dev_private;
332
333         /* Persistent context: Read Only (update not required) */
334         struct rte_sched_port *sched = p->soft.tm.sched;
335         struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
336         struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
337         uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
338         uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
339         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
340
341         /* Persistent context: Read - Write (update required) */
342         uint32_t txq_pos = p->soft.tm.txq_pos;
343         uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
344         uint32_t flush_count = p->soft.tm.flush_count;
345
346         /* Not part of the persistent context */
347         uint32_t pkts_deq_len, pos;
348         uint16_t i;
349
350         /* Soft device TXQ read, TM enqueue */
351         for (i = 0; i < nb_tx_queues; i++) {
352                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
353
354                 /* Read TXQ burst to packet enqueue buffer */
355                 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
356                         (void **)&pkts_enq[pkts_enq_len],
357                         enq_bsz,
358                         NULL);
359
360                 /* Increment TXQ */
361                 txq_pos++;
362                 if (txq_pos >= nb_tx_queues)
363                         txq_pos = 0;
364
365                 /* TM enqueue when complete burst is available */
366                 if (pkts_enq_len >= enq_bsz) {
367                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
368
369                         pkts_enq_len = 0;
370                         flush_count = 0;
371                         break;
372                 }
373         }
374
375         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
376                 if (pkts_enq_len)
377                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
378
379                 pkts_enq_len = 0;
380                 flush_count = 0;
381         }
382
383         p->soft.tm.txq_pos = txq_pos;
384         p->soft.tm.pkts_enq_len = pkts_enq_len;
385         p->soft.tm.flush_count = flush_count + 1;
386
387         /* TM dequeue, Hard device TXQ write */
388         pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
389
390         for (pos = 0; pos < pkts_deq_len; )
391                 pos += rte_eth_tx_burst(p->hard.port_id,
392                         p->params.hard.tx_queue_id,
393                         &pkts_deq[pos],
394                         (uint16_t)(pkts_deq_len - pos));
395
396         return 0;
397 }
398
399 int
400 rte_pmd_softnic_run(uint16_t port_id)
401 {
402         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
403
404 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
406 #endif
407
408         return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
409 }
410
411 static struct ether_addr eth_addr = { .addr_bytes = {0} };
412
413 static uint32_t
414 eth_dev_speed_max_mbps(uint32_t speed_capa)
415 {
416         uint32_t rate_mbps[32] = {
417                 ETH_SPEED_NUM_NONE,
418                 ETH_SPEED_NUM_10M,
419                 ETH_SPEED_NUM_10M,
420                 ETH_SPEED_NUM_100M,
421                 ETH_SPEED_NUM_100M,
422                 ETH_SPEED_NUM_1G,
423                 ETH_SPEED_NUM_2_5G,
424                 ETH_SPEED_NUM_5G,
425                 ETH_SPEED_NUM_10G,
426                 ETH_SPEED_NUM_20G,
427                 ETH_SPEED_NUM_25G,
428                 ETH_SPEED_NUM_40G,
429                 ETH_SPEED_NUM_50G,
430                 ETH_SPEED_NUM_56G,
431                 ETH_SPEED_NUM_100G,
432         };
433
434         uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
435         return rate_mbps[pos];
436 }
437
438 static int
439 default_init(struct pmd_internals *p,
440         struct pmd_params *params,
441         int numa_node)
442 {
443         p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
444                 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
445                 0,
446                 numa_node);
447
448         if (p->soft.def.pkts == NULL)
449                 return -ENOMEM;
450
451         return 0;
452 }
453
454 static void
455 default_free(struct pmd_internals *p)
456 {
457         rte_free(p->soft.def.pkts);
458 }
459
460 static void *
461 pmd_init(struct pmd_params *params, int numa_node)
462 {
463         struct pmd_internals *p;
464         int status;
465
466         p = rte_zmalloc_socket(params->soft.name,
467                 sizeof(struct pmd_internals),
468                 0,
469                 numa_node);
470         if (p == NULL)
471                 return NULL;
472
473         memcpy(&p->params, params, sizeof(p->params));
474         rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
475
476         /* Default */
477         status = default_init(p, params, numa_node);
478         if (status) {
479                 free(p->params.hard.name);
480                 rte_free(p);
481                 return NULL;
482         }
483
484         /* Traffic Management (TM)*/
485         if (params->soft.flags & PMD_FEATURE_TM) {
486                 status = tm_init(p, params, numa_node);
487                 if (status) {
488                         default_free(p);
489                         free(p->params.hard.name);
490                         rte_free(p);
491                         return NULL;
492                 }
493         }
494
495         return p;
496 }
497
498 static void
499 pmd_free(struct pmd_internals *p)
500 {
501         if (p->params.soft.flags & PMD_FEATURE_TM)
502                 tm_free(p);
503
504         default_free(p);
505
506         free(p->params.hard.name);
507         rte_free(p);
508 }
509
510 static int
511 pmd_ethdev_register(struct rte_vdev_device *vdev,
512         struct pmd_params *params,
513         void *dev_private)
514 {
515         struct rte_eth_dev_info hard_info;
516         struct rte_eth_dev *soft_dev;
517         uint32_t hard_speed;
518         int numa_node;
519         uint16_t hard_port_id;
520
521         rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
522         rte_eth_dev_info_get(hard_port_id, &hard_info);
523         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
524         numa_node = rte_eth_dev_socket_id(hard_port_id);
525
526         /* Ethdev entry allocation */
527         soft_dev = rte_eth_dev_allocate(params->soft.name);
528         if (!soft_dev)
529                 return -ENOMEM;
530
531         /* dev */
532         soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
533                 NULL : /* set up later */
534                 pmd_rx_pkt_burst;
535         soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
536         soft_dev->tx_pkt_prepare = NULL;
537         soft_dev->dev_ops = &pmd_ops;
538         soft_dev->device = &vdev->device;
539
540         /* dev->data */
541         soft_dev->data->dev_private = dev_private;
542         soft_dev->data->dev_link.link_speed = hard_speed;
543         soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
544         soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
545         soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
546         soft_dev->data->mac_addrs = &eth_addr;
547         soft_dev->data->promiscuous = 1;
548         soft_dev->data->kdrv = RTE_KDRV_NONE;
549         soft_dev->data->numa_node = numa_node;
550         soft_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
551
552         return 0;
553 }
554
555 static int
556 get_string(const char *key __rte_unused, const char *value, void *extra_args)
557 {
558         if (!value || !extra_args)
559                 return -EINVAL;
560
561         *(char **)extra_args = strdup(value);
562
563         if (!*(char **)extra_args)
564                 return -ENOMEM;
565
566         return 0;
567 }
568
569 static int
570 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
571 {
572         if (!value || !extra_args)
573                 return -EINVAL;
574
575         *(uint32_t *)extra_args = strtoull(value, NULL, 0);
576
577         return 0;
578 }
579
580 static int
581 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
582 {
583         struct rte_kvargs *kvlist;
584         int i, ret;
585
586         kvlist = rte_kvargs_parse(params, pmd_valid_args);
587         if (kvlist == NULL)
588                 return -EINVAL;
589
590         /* Set default values */
591         memset(p, 0, sizeof(*p));
592         p->soft.name = name;
593         p->soft.intrusive = INTRUSIVE;
594         p->soft.tm.rate = 0;
595         p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
596         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
597                 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
598         p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
599         p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
600         p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
601
602         /* SOFT: TM (optional) */
603         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
604                 char *s;
605
606                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
607                         &get_string, &s);
608                 if (ret < 0)
609                         goto out_free;
610
611                 if (strcmp(s, "on") == 0)
612                         p->soft.flags |= PMD_FEATURE_TM;
613                 else if (strcmp(s, "off") == 0)
614                         p->soft.flags &= ~PMD_FEATURE_TM;
615                 else
616                         ret = -EINVAL;
617
618                 free(s);
619                 if (ret)
620                         goto out_free;
621         }
622
623         /* SOFT: TM rate (measured in bytes/second) (optional) */
624         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
625                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
626                         &get_uint32, &p->soft.tm.rate);
627                 if (ret < 0)
628                         goto out_free;
629
630                 p->soft.flags |= PMD_FEATURE_TM;
631         }
632
633         /* SOFT: TM number of queues (optional) */
634         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
635                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
636                         &get_uint32, &p->soft.tm.nb_queues);
637                 if (ret < 0)
638                         goto out_free;
639
640                 p->soft.flags |= PMD_FEATURE_TM;
641         }
642
643         /* SOFT: TM queue size 0 .. 3 (optional) */
644         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
645                 uint32_t qsize;
646
647                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
648                         &get_uint32, &qsize);
649                 if (ret < 0)
650                         goto out_free;
651
652                 p->soft.tm.qsize[0] = (uint16_t)qsize;
653                 p->soft.flags |= PMD_FEATURE_TM;
654         }
655
656         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
657                 uint32_t qsize;
658
659                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
660                         &get_uint32, &qsize);
661                 if (ret < 0)
662                         goto out_free;
663
664                 p->soft.tm.qsize[1] = (uint16_t)qsize;
665                 p->soft.flags |= PMD_FEATURE_TM;
666         }
667
668         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
669                 uint32_t qsize;
670
671                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
672                         &get_uint32, &qsize);
673                 if (ret < 0)
674                         goto out_free;
675
676                 p->soft.tm.qsize[2] = (uint16_t)qsize;
677                 p->soft.flags |= PMD_FEATURE_TM;
678         }
679
680         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
681                 uint32_t qsize;
682
683                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
684                         &get_uint32, &qsize);
685                 if (ret < 0)
686                         goto out_free;
687
688                 p->soft.tm.qsize[3] = (uint16_t)qsize;
689                 p->soft.flags |= PMD_FEATURE_TM;
690         }
691
692         /* SOFT: TM enqueue burst size (optional) */
693         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
694                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
695                         &get_uint32, &p->soft.tm.enq_bsz);
696                 if (ret < 0)
697                         goto out_free;
698
699                 p->soft.flags |= PMD_FEATURE_TM;
700         }
701
702         /* SOFT: TM dequeue burst size (optional) */
703         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
704                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
705                         &get_uint32, &p->soft.tm.deq_bsz);
706                 if (ret < 0)
707                         goto out_free;
708
709                 p->soft.flags |= PMD_FEATURE_TM;
710         }
711
712         /* HARD: name (mandatory) */
713         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
714                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
715                         &get_string, &p->hard.name);
716                 if (ret < 0)
717                         goto out_free;
718         } else {
719                 ret = -EINVAL;
720                 goto out_free;
721         }
722
723         /* HARD: tx_queue_id (optional) */
724         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
725                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
726                         &get_uint32, &p->hard.tx_queue_id);
727                 if (ret < 0)
728                         goto out_free;
729         }
730
731 out_free:
732         rte_kvargs_free(kvlist);
733         return ret;
734 }
735
736 static int
737 pmd_probe(struct rte_vdev_device *vdev)
738 {
739         struct pmd_params p;
740         const char *params;
741         int status;
742
743         struct rte_eth_dev_info hard_info;
744         uint32_t hard_speed;
745         uint16_t hard_port_id;
746         int numa_node;
747         void *dev_private;
748
749         RTE_LOG(INFO, PMD,
750                 "Probing device \"%s\"\n",
751                 rte_vdev_device_name(vdev));
752
753         /* Parse input arguments */
754         params = rte_vdev_device_args(vdev);
755         if (!params)
756                 return -EINVAL;
757
758         status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
759         if (status)
760                 return status;
761
762         /* Check input arguments */
763         if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
764                 return -EINVAL;
765
766         rte_eth_dev_info_get(hard_port_id, &hard_info);
767         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
768         numa_node = rte_eth_dev_socket_id(hard_port_id);
769
770         if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
771                 return -EINVAL;
772
773         if (p.soft.flags & PMD_FEATURE_TM) {
774                 status = tm_params_check(&p, hard_speed);
775
776                 if (status)
777                         return status;
778         }
779
780         /* Allocate and initialize soft ethdev private data */
781         dev_private = pmd_init(&p, numa_node);
782         if (dev_private == NULL)
783                 return -ENOMEM;
784
785         /* Register soft ethdev */
786         RTE_LOG(INFO, PMD,
787                 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
788                 p.soft.name, p.hard.name);
789
790         status = pmd_ethdev_register(vdev, &p, dev_private);
791         if (status) {
792                 pmd_free(dev_private);
793                 return status;
794         }
795
796         return 0;
797 }
798
799 static int
800 pmd_remove(struct rte_vdev_device *vdev)
801 {
802         struct rte_eth_dev *dev = NULL;
803         struct pmd_internals *p;
804
805         if (!vdev)
806                 return -EINVAL;
807
808         RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
809                 rte_vdev_device_name(vdev));
810
811         /* Find the ethdev entry */
812         dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
813         if (dev == NULL)
814                 return -ENODEV;
815         p = dev->data->dev_private;
816
817         /* Free device data structures*/
818         pmd_free(p);
819         rte_free(dev->data);
820         rte_eth_dev_release_port(dev);
821
822         return 0;
823 }
824
825 static struct rte_vdev_driver pmd_softnic_drv = {
826         .probe = pmd_probe,
827         .remove = pmd_remove,
828 };
829
830 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
831 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
832         PMD_PARAM_SOFT_TM        "=on|off "
833         PMD_PARAM_SOFT_TM_RATE "=<int> "
834         PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
835         PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
836         PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
837         PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
838         PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
839         PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
840         PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
841         PMD_PARAM_HARD_NAME "=<string> "
842         PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");