net/softnic: add TM capabilities ops
[dpdk.git] / drivers / net / softnic / rte_eth_softnic.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
44 #include <rte_ring.h>
45 #include <rte_sched.h>
46 #include <rte_tm_driver.h>
47
48 #include "rte_eth_softnic.h"
49 #include "rte_eth_softnic_internals.h"
50
51 #define DEV_HARD(p)                                     \
52         (&rte_eth_devices[p->hard.port_id])
53
54 #define PMD_PARAM_SOFT_TM                                       "soft_tm"
55 #define PMD_PARAM_SOFT_TM_RATE                          "soft_tm_rate"
56 #define PMD_PARAM_SOFT_TM_NB_QUEUES                     "soft_tm_nb_queues"
57 #define PMD_PARAM_SOFT_TM_QSIZE0                        "soft_tm_qsize0"
58 #define PMD_PARAM_SOFT_TM_QSIZE1                        "soft_tm_qsize1"
59 #define PMD_PARAM_SOFT_TM_QSIZE2                        "soft_tm_qsize2"
60 #define PMD_PARAM_SOFT_TM_QSIZE3                        "soft_tm_qsize3"
61 #define PMD_PARAM_SOFT_TM_ENQ_BSZ                       "soft_tm_enq_bsz"
62 #define PMD_PARAM_SOFT_TM_DEQ_BSZ                       "soft_tm_deq_bsz"
63
64 #define PMD_PARAM_HARD_NAME                                     "hard_name"
65 #define PMD_PARAM_HARD_TX_QUEUE_ID                      "hard_tx_queue_id"
66
67 static const char *pmd_valid_args[] = {
68         PMD_PARAM_SOFT_TM,
69         PMD_PARAM_SOFT_TM_RATE,
70         PMD_PARAM_SOFT_TM_NB_QUEUES,
71         PMD_PARAM_SOFT_TM_QSIZE0,
72         PMD_PARAM_SOFT_TM_QSIZE1,
73         PMD_PARAM_SOFT_TM_QSIZE2,
74         PMD_PARAM_SOFT_TM_QSIZE3,
75         PMD_PARAM_SOFT_TM_ENQ_BSZ,
76         PMD_PARAM_SOFT_TM_DEQ_BSZ,
77         PMD_PARAM_HARD_NAME,
78         PMD_PARAM_HARD_TX_QUEUE_ID,
79         NULL
80 };
81
82 static const struct rte_eth_dev_info pmd_dev_info = {
83         .min_rx_bufsize = 0,
84         .max_rx_pktlen = UINT32_MAX,
85         .max_rx_queues = UINT16_MAX,
86         .max_tx_queues = UINT16_MAX,
87         .rx_desc_lim = {
88                 .nb_max = UINT16_MAX,
89                 .nb_min = 0,
90                 .nb_align = 1,
91         },
92         .tx_desc_lim = {
93                 .nb_max = UINT16_MAX,
94                 .nb_min = 0,
95                 .nb_align = 1,
96         },
97 };
98
99 static void
100 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
101         struct rte_eth_dev_info *dev_info)
102 {
103         memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
104 }
105
106 static int
107 pmd_dev_configure(struct rte_eth_dev *dev)
108 {
109         struct pmd_internals *p = dev->data->dev_private;
110         struct rte_eth_dev *hard_dev = DEV_HARD(p);
111
112         if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
113                 return -1;
114
115         if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
116                 return -1;
117
118         return 0;
119 }
120
121 static int
122 pmd_rx_queue_setup(struct rte_eth_dev *dev,
123         uint16_t rx_queue_id,
124         uint16_t nb_rx_desc __rte_unused,
125         unsigned int socket_id,
126         const struct rte_eth_rxconf *rx_conf __rte_unused,
127         struct rte_mempool *mb_pool __rte_unused)
128 {
129         struct pmd_internals *p = dev->data->dev_private;
130
131         if (p->params.soft.intrusive == 0) {
132                 struct pmd_rx_queue *rxq;
133
134                 rxq = rte_zmalloc_socket(p->params.soft.name,
135                         sizeof(struct pmd_rx_queue), 0, socket_id);
136                 if (rxq == NULL)
137                         return -ENOMEM;
138
139                 rxq->hard.port_id = p->hard.port_id;
140                 rxq->hard.rx_queue_id = rx_queue_id;
141                 dev->data->rx_queues[rx_queue_id] = rxq;
142         } else {
143                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
144                 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
145
146                 if (rxq == NULL)
147                         return -1;
148
149                 dev->data->rx_queues[rx_queue_id] = rxq;
150         }
151         return 0;
152 }
153
154 static int
155 pmd_tx_queue_setup(struct rte_eth_dev *dev,
156         uint16_t tx_queue_id,
157         uint16_t nb_tx_desc,
158         unsigned int socket_id,
159         const struct rte_eth_txconf *tx_conf __rte_unused)
160 {
161         uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
162         char name[size];
163         struct rte_ring *r;
164
165         snprintf(name, sizeof(name), "%s_txq%04x",
166                 dev->data->name, tx_queue_id);
167         r = rte_ring_create(name, nb_tx_desc, socket_id,
168                 RING_F_SP_ENQ | RING_F_SC_DEQ);
169         if (r == NULL)
170                 return -1;
171
172         dev->data->tx_queues[tx_queue_id] = r;
173         return 0;
174 }
175
176 static int
177 pmd_dev_start(struct rte_eth_dev *dev)
178 {
179         struct pmd_internals *p = dev->data->dev_private;
180
181         if (tm_used(dev)) {
182                 int status = tm_start(p);
183
184                 if (status)
185                         return status;
186         }
187
188         dev->data->dev_link.link_status = ETH_LINK_UP;
189
190         if (p->params.soft.intrusive) {
191                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
192
193                 /* The hard_dev->rx_pkt_burst should be stable by now */
194                 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
195         }
196
197         return 0;
198 }
199
200 static void
201 pmd_dev_stop(struct rte_eth_dev *dev)
202 {
203         struct pmd_internals *p = dev->data->dev_private;
204
205         dev->data->dev_link.link_status = ETH_LINK_DOWN;
206
207         if (tm_used(dev))
208                 tm_stop(p);
209 }
210
211 static void
212 pmd_dev_close(struct rte_eth_dev *dev)
213 {
214         uint32_t i;
215
216         /* TX queues */
217         for (i = 0; i < dev->data->nb_tx_queues; i++)
218                 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
219 }
220
221 static int
222 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
223         int wait_to_complete __rte_unused)
224 {
225         return 0;
226 }
227
228 static int
229 pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
230 {
231         *(const struct rte_tm_ops **)arg =
232                 (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
233
234         return 0;
235 }
236
237 static const struct eth_dev_ops pmd_ops = {
238         .dev_configure = pmd_dev_configure,
239         .dev_start = pmd_dev_start,
240         .dev_stop = pmd_dev_stop,
241         .dev_close = pmd_dev_close,
242         .link_update = pmd_link_update,
243         .dev_infos_get = pmd_dev_infos_get,
244         .rx_queue_setup = pmd_rx_queue_setup,
245         .tx_queue_setup = pmd_tx_queue_setup,
246         .tm_ops_get = pmd_tm_ops_get,
247 };
248
249 static uint16_t
250 pmd_rx_pkt_burst(void *rxq,
251         struct rte_mbuf **rx_pkts,
252         uint16_t nb_pkts)
253 {
254         struct pmd_rx_queue *rx_queue = rxq;
255
256         return rte_eth_rx_burst(rx_queue->hard.port_id,
257                 rx_queue->hard.rx_queue_id,
258                 rx_pkts,
259                 nb_pkts);
260 }
261
262 static uint16_t
263 pmd_tx_pkt_burst(void *txq,
264         struct rte_mbuf **tx_pkts,
265         uint16_t nb_pkts)
266 {
267         return (uint16_t)rte_ring_enqueue_burst(txq,
268                 (void **)tx_pkts,
269                 nb_pkts,
270                 NULL);
271 }
272
273 static __rte_always_inline int
274 run_default(struct rte_eth_dev *dev)
275 {
276         struct pmd_internals *p = dev->data->dev_private;
277
278         /* Persistent context: Read Only (update not required) */
279         struct rte_mbuf **pkts = p->soft.def.pkts;
280         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
281
282         /* Persistent context: Read - Write (update required) */
283         uint32_t txq_pos = p->soft.def.txq_pos;
284         uint32_t pkts_len = p->soft.def.pkts_len;
285         uint32_t flush_count = p->soft.def.flush_count;
286
287         /* Not part of the persistent context */
288         uint32_t pos;
289         uint16_t i;
290
291         /* Soft device TXQ read, Hard device TXQ write */
292         for (i = 0; i < nb_tx_queues; i++) {
293                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
294
295                 /* Read soft device TXQ burst to packet enqueue buffer */
296                 pkts_len += rte_ring_sc_dequeue_burst(txq,
297                         (void **)&pkts[pkts_len],
298                         DEFAULT_BURST_SIZE,
299                         NULL);
300
301                 /* Increment soft device TXQ */
302                 txq_pos++;
303                 if (txq_pos >= nb_tx_queues)
304                         txq_pos = 0;
305
306                 /* Hard device TXQ write when complete burst is available */
307                 if (pkts_len >= DEFAULT_BURST_SIZE) {
308                         for (pos = 0; pos < pkts_len; )
309                                 pos += rte_eth_tx_burst(p->hard.port_id,
310                                         p->params.hard.tx_queue_id,
311                                         &pkts[pos],
312                                         (uint16_t)(pkts_len - pos));
313
314                         pkts_len = 0;
315                         flush_count = 0;
316                         break;
317                 }
318         }
319
320         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
321                 for (pos = 0; pos < pkts_len; )
322                         pos += rte_eth_tx_burst(p->hard.port_id,
323                                 p->params.hard.tx_queue_id,
324                                 &pkts[pos],
325                                 (uint16_t)(pkts_len - pos));
326
327                 pkts_len = 0;
328                 flush_count = 0;
329         }
330
331         p->soft.def.txq_pos = txq_pos;
332         p->soft.def.pkts_len = pkts_len;
333         p->soft.def.flush_count = flush_count + 1;
334
335         return 0;
336 }
337
338 static __rte_always_inline int
339 run_tm(struct rte_eth_dev *dev)
340 {
341         struct pmd_internals *p = dev->data->dev_private;
342
343         /* Persistent context: Read Only (update not required) */
344         struct rte_sched_port *sched = p->soft.tm.sched;
345         struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
346         struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
347         uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
348         uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
349         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
350
351         /* Persistent context: Read - Write (update required) */
352         uint32_t txq_pos = p->soft.tm.txq_pos;
353         uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
354         uint32_t flush_count = p->soft.tm.flush_count;
355
356         /* Not part of the persistent context */
357         uint32_t pkts_deq_len, pos;
358         uint16_t i;
359
360         /* Soft device TXQ read, TM enqueue */
361         for (i = 0; i < nb_tx_queues; i++) {
362                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
363
364                 /* Read TXQ burst to packet enqueue buffer */
365                 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
366                         (void **)&pkts_enq[pkts_enq_len],
367                         enq_bsz,
368                         NULL);
369
370                 /* Increment TXQ */
371                 txq_pos++;
372                 if (txq_pos >= nb_tx_queues)
373                         txq_pos = 0;
374
375                 /* TM enqueue when complete burst is available */
376                 if (pkts_enq_len >= enq_bsz) {
377                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
378
379                         pkts_enq_len = 0;
380                         flush_count = 0;
381                         break;
382                 }
383         }
384
385         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
386                 if (pkts_enq_len)
387                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
388
389                 pkts_enq_len = 0;
390                 flush_count = 0;
391         }
392
393         p->soft.tm.txq_pos = txq_pos;
394         p->soft.tm.pkts_enq_len = pkts_enq_len;
395         p->soft.tm.flush_count = flush_count + 1;
396
397         /* TM dequeue, Hard device TXQ write */
398         pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
399
400         for (pos = 0; pos < pkts_deq_len; )
401                 pos += rte_eth_tx_burst(p->hard.port_id,
402                         p->params.hard.tx_queue_id,
403                         &pkts_deq[pos],
404                         (uint16_t)(pkts_deq_len - pos));
405
406         return 0;
407 }
408
409 int
410 rte_pmd_softnic_run(uint16_t port_id)
411 {
412         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
413
414 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
416 #endif
417
418         return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
419 }
420
421 static struct ether_addr eth_addr = { .addr_bytes = {0} };
422
423 static uint32_t
424 eth_dev_speed_max_mbps(uint32_t speed_capa)
425 {
426         uint32_t rate_mbps[32] = {
427                 ETH_SPEED_NUM_NONE,
428                 ETH_SPEED_NUM_10M,
429                 ETH_SPEED_NUM_10M,
430                 ETH_SPEED_NUM_100M,
431                 ETH_SPEED_NUM_100M,
432                 ETH_SPEED_NUM_1G,
433                 ETH_SPEED_NUM_2_5G,
434                 ETH_SPEED_NUM_5G,
435                 ETH_SPEED_NUM_10G,
436                 ETH_SPEED_NUM_20G,
437                 ETH_SPEED_NUM_25G,
438                 ETH_SPEED_NUM_40G,
439                 ETH_SPEED_NUM_50G,
440                 ETH_SPEED_NUM_56G,
441                 ETH_SPEED_NUM_100G,
442         };
443
444         uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
445         return rate_mbps[pos];
446 }
447
448 static int
449 default_init(struct pmd_internals *p,
450         struct pmd_params *params,
451         int numa_node)
452 {
453         p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
454                 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
455                 0,
456                 numa_node);
457
458         if (p->soft.def.pkts == NULL)
459                 return -ENOMEM;
460
461         return 0;
462 }
463
464 static void
465 default_free(struct pmd_internals *p)
466 {
467         rte_free(p->soft.def.pkts);
468 }
469
470 static void *
471 pmd_init(struct pmd_params *params, int numa_node)
472 {
473         struct pmd_internals *p;
474         int status;
475
476         p = rte_zmalloc_socket(params->soft.name,
477                 sizeof(struct pmd_internals),
478                 0,
479                 numa_node);
480         if (p == NULL)
481                 return NULL;
482
483         memcpy(&p->params, params, sizeof(p->params));
484         rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
485
486         /* Default */
487         status = default_init(p, params, numa_node);
488         if (status) {
489                 free(p->params.hard.name);
490                 rte_free(p);
491                 return NULL;
492         }
493
494         /* Traffic Management (TM)*/
495         if (params->soft.flags & PMD_FEATURE_TM) {
496                 status = tm_init(p, params, numa_node);
497                 if (status) {
498                         default_free(p);
499                         free(p->params.hard.name);
500                         rte_free(p);
501                         return NULL;
502                 }
503         }
504
505         return p;
506 }
507
508 static void
509 pmd_free(struct pmd_internals *p)
510 {
511         if (p->params.soft.flags & PMD_FEATURE_TM)
512                 tm_free(p);
513
514         default_free(p);
515
516         free(p->params.hard.name);
517         rte_free(p);
518 }
519
520 static int
521 pmd_ethdev_register(struct rte_vdev_device *vdev,
522         struct pmd_params *params,
523         void *dev_private)
524 {
525         struct rte_eth_dev_info hard_info;
526         struct rte_eth_dev *soft_dev;
527         uint32_t hard_speed;
528         int numa_node;
529         uint16_t hard_port_id;
530
531         rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
532         rte_eth_dev_info_get(hard_port_id, &hard_info);
533         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
534         numa_node = rte_eth_dev_socket_id(hard_port_id);
535
536         /* Ethdev entry allocation */
537         soft_dev = rte_eth_dev_allocate(params->soft.name);
538         if (!soft_dev)
539                 return -ENOMEM;
540
541         /* dev */
542         soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
543                 NULL : /* set up later */
544                 pmd_rx_pkt_burst;
545         soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
546         soft_dev->tx_pkt_prepare = NULL;
547         soft_dev->dev_ops = &pmd_ops;
548         soft_dev->device = &vdev->device;
549
550         /* dev->data */
551         soft_dev->data->dev_private = dev_private;
552         soft_dev->data->dev_link.link_speed = hard_speed;
553         soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
554         soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
555         soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
556         soft_dev->data->mac_addrs = &eth_addr;
557         soft_dev->data->promiscuous = 1;
558         soft_dev->data->kdrv = RTE_KDRV_NONE;
559         soft_dev->data->numa_node = numa_node;
560         soft_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
561
562         return 0;
563 }
564
565 static int
566 get_string(const char *key __rte_unused, const char *value, void *extra_args)
567 {
568         if (!value || !extra_args)
569                 return -EINVAL;
570
571         *(char **)extra_args = strdup(value);
572
573         if (!*(char **)extra_args)
574                 return -ENOMEM;
575
576         return 0;
577 }
578
579 static int
580 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
581 {
582         if (!value || !extra_args)
583                 return -EINVAL;
584
585         *(uint32_t *)extra_args = strtoull(value, NULL, 0);
586
587         return 0;
588 }
589
590 static int
591 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
592 {
593         struct rte_kvargs *kvlist;
594         int i, ret;
595
596         kvlist = rte_kvargs_parse(params, pmd_valid_args);
597         if (kvlist == NULL)
598                 return -EINVAL;
599
600         /* Set default values */
601         memset(p, 0, sizeof(*p));
602         p->soft.name = name;
603         p->soft.intrusive = INTRUSIVE;
604         p->soft.tm.rate = 0;
605         p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
606         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
607                 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
608         p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
609         p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
610         p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
611
612         /* SOFT: TM (optional) */
613         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
614                 char *s;
615
616                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
617                         &get_string, &s);
618                 if (ret < 0)
619                         goto out_free;
620
621                 if (strcmp(s, "on") == 0)
622                         p->soft.flags |= PMD_FEATURE_TM;
623                 else if (strcmp(s, "off") == 0)
624                         p->soft.flags &= ~PMD_FEATURE_TM;
625                 else
626                         ret = -EINVAL;
627
628                 free(s);
629                 if (ret)
630                         goto out_free;
631         }
632
633         /* SOFT: TM rate (measured in bytes/second) (optional) */
634         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
635                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
636                         &get_uint32, &p->soft.tm.rate);
637                 if (ret < 0)
638                         goto out_free;
639
640                 p->soft.flags |= PMD_FEATURE_TM;
641         }
642
643         /* SOFT: TM number of queues (optional) */
644         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
645                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
646                         &get_uint32, &p->soft.tm.nb_queues);
647                 if (ret < 0)
648                         goto out_free;
649
650                 p->soft.flags |= PMD_FEATURE_TM;
651         }
652
653         /* SOFT: TM queue size 0 .. 3 (optional) */
654         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
655                 uint32_t qsize;
656
657                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
658                         &get_uint32, &qsize);
659                 if (ret < 0)
660                         goto out_free;
661
662                 p->soft.tm.qsize[0] = (uint16_t)qsize;
663                 p->soft.flags |= PMD_FEATURE_TM;
664         }
665
666         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
667                 uint32_t qsize;
668
669                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
670                         &get_uint32, &qsize);
671                 if (ret < 0)
672                         goto out_free;
673
674                 p->soft.tm.qsize[1] = (uint16_t)qsize;
675                 p->soft.flags |= PMD_FEATURE_TM;
676         }
677
678         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
679                 uint32_t qsize;
680
681                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
682                         &get_uint32, &qsize);
683                 if (ret < 0)
684                         goto out_free;
685
686                 p->soft.tm.qsize[2] = (uint16_t)qsize;
687                 p->soft.flags |= PMD_FEATURE_TM;
688         }
689
690         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
691                 uint32_t qsize;
692
693                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
694                         &get_uint32, &qsize);
695                 if (ret < 0)
696                         goto out_free;
697
698                 p->soft.tm.qsize[3] = (uint16_t)qsize;
699                 p->soft.flags |= PMD_FEATURE_TM;
700         }
701
702         /* SOFT: TM enqueue burst size (optional) */
703         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
704                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
705                         &get_uint32, &p->soft.tm.enq_bsz);
706                 if (ret < 0)
707                         goto out_free;
708
709                 p->soft.flags |= PMD_FEATURE_TM;
710         }
711
712         /* SOFT: TM dequeue burst size (optional) */
713         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
714                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
715                         &get_uint32, &p->soft.tm.deq_bsz);
716                 if (ret < 0)
717                         goto out_free;
718
719                 p->soft.flags |= PMD_FEATURE_TM;
720         }
721
722         /* HARD: name (mandatory) */
723         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
724                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
725                         &get_string, &p->hard.name);
726                 if (ret < 0)
727                         goto out_free;
728         } else {
729                 ret = -EINVAL;
730                 goto out_free;
731         }
732
733         /* HARD: tx_queue_id (optional) */
734         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
735                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
736                         &get_uint32, &p->hard.tx_queue_id);
737                 if (ret < 0)
738                         goto out_free;
739         }
740
741 out_free:
742         rte_kvargs_free(kvlist);
743         return ret;
744 }
745
746 static int
747 pmd_probe(struct rte_vdev_device *vdev)
748 {
749         struct pmd_params p;
750         const char *params;
751         int status;
752
753         struct rte_eth_dev_info hard_info;
754         uint32_t hard_speed;
755         uint16_t hard_port_id;
756         int numa_node;
757         void *dev_private;
758
759         RTE_LOG(INFO, PMD,
760                 "Probing device \"%s\"\n",
761                 rte_vdev_device_name(vdev));
762
763         /* Parse input arguments */
764         params = rte_vdev_device_args(vdev);
765         if (!params)
766                 return -EINVAL;
767
768         status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
769         if (status)
770                 return status;
771
772         /* Check input arguments */
773         if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
774                 return -EINVAL;
775
776         rte_eth_dev_info_get(hard_port_id, &hard_info);
777         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
778         numa_node = rte_eth_dev_socket_id(hard_port_id);
779
780         if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
781                 return -EINVAL;
782
783         if (p.soft.flags & PMD_FEATURE_TM) {
784                 status = tm_params_check(&p, hard_speed);
785
786                 if (status)
787                         return status;
788         }
789
790         /* Allocate and initialize soft ethdev private data */
791         dev_private = pmd_init(&p, numa_node);
792         if (dev_private == NULL)
793                 return -ENOMEM;
794
795         /* Register soft ethdev */
796         RTE_LOG(INFO, PMD,
797                 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
798                 p.soft.name, p.hard.name);
799
800         status = pmd_ethdev_register(vdev, &p, dev_private);
801         if (status) {
802                 pmd_free(dev_private);
803                 return status;
804         }
805
806         return 0;
807 }
808
809 static int
810 pmd_remove(struct rte_vdev_device *vdev)
811 {
812         struct rte_eth_dev *dev = NULL;
813         struct pmd_internals *p;
814
815         if (!vdev)
816                 return -EINVAL;
817
818         RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
819                 rte_vdev_device_name(vdev));
820
821         /* Find the ethdev entry */
822         dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
823         if (dev == NULL)
824                 return -ENODEV;
825         p = dev->data->dev_private;
826
827         /* Free device data structures*/
828         pmd_free(p);
829         rte_free(dev->data);
830         rte_eth_dev_release_port(dev);
831
832         return 0;
833 }
834
835 static struct rte_vdev_driver pmd_softnic_drv = {
836         .probe = pmd_probe,
837         .remove = pmd_remove,
838 };
839
840 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
841 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
842         PMD_PARAM_SOFT_TM        "=on|off "
843         PMD_PARAM_SOFT_TM_RATE "=<int> "
844         PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
845         PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
846         PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
847         PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
848         PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
849         PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
850         PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
851         PMD_PARAM_HARD_NAME "=<string> "
852         PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");