net: remove dead driver names
[dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_memzone.h>
40 #include <rte_string_fns.h>
41 #include <rte_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
44
45 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
46 #define ETH_RING_ACTION_CREATE          "CREATE"
47 #define ETH_RING_ACTION_ATTACH          "ATTACH"
48
49 static const char *valid_arguments[] = {
50         ETH_RING_NUMA_NODE_ACTION_ARG,
51         NULL
52 };
53
54 enum dev_action {
55         DEV_CREATE,
56         DEV_ATTACH
57 };
58
59 struct ring_queue {
60         struct rte_ring *rng;
61         rte_atomic64_t rx_pkts;
62         rte_atomic64_t tx_pkts;
63         rte_atomic64_t err_pkts;
64 };
65
66 struct pmd_internals {
67         unsigned max_rx_queues;
68         unsigned max_tx_queues;
69
70         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
71         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
72
73         struct ether_addr address;
74         enum dev_action action;
75 };
76
77
78 static struct rte_eth_link pmd_link = {
79                 .link_speed = ETH_SPEED_NUM_10G,
80                 .link_duplex = ETH_LINK_FULL_DUPLEX,
81                 .link_status = ETH_LINK_DOWN,
82                 .link_autoneg = ETH_LINK_SPEED_AUTONEG
83 };
84
85 static uint16_t
86 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
87 {
88         void **ptrs = (void *)&bufs[0];
89         struct ring_queue *r = q;
90         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
91                         ptrs, nb_bufs);
92         if (r->rng->flags & RING_F_SC_DEQ)
93                 r->rx_pkts.cnt += nb_rx;
94         else
95                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
96         return nb_rx;
97 }
98
99 static uint16_t
100 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 {
102         void **ptrs = (void *)&bufs[0];
103         struct ring_queue *r = q;
104         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
105                         ptrs, nb_bufs);
106         if (r->rng->flags & RING_F_SP_ENQ) {
107                 r->tx_pkts.cnt += nb_tx;
108                 r->err_pkts.cnt += nb_bufs - nb_tx;
109         } else {
110                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
111                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
112         }
113         return nb_tx;
114 }
115
116 static int
117 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
118
119 static int
120 eth_dev_start(struct rte_eth_dev *dev)
121 {
122         dev->data->dev_link.link_status = ETH_LINK_UP;
123         return 0;
124 }
125
126 static void
127 eth_dev_stop(struct rte_eth_dev *dev)
128 {
129         dev->data->dev_link.link_status = ETH_LINK_DOWN;
130 }
131
132 static int
133 eth_dev_set_link_down(struct rte_eth_dev *dev)
134 {
135         dev->data->dev_link.link_status = ETH_LINK_DOWN;
136         return 0;
137 }
138
139 static int
140 eth_dev_set_link_up(struct rte_eth_dev *dev)
141 {
142         dev->data->dev_link.link_status = ETH_LINK_UP;
143         return 0;
144 }
145
146 static int
147 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
148                                     uint16_t nb_rx_desc __rte_unused,
149                                     unsigned int socket_id __rte_unused,
150                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
151                                     struct rte_mempool *mb_pool __rte_unused)
152 {
153         struct pmd_internals *internals = dev->data->dev_private;
154         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
155         return 0;
156 }
157
158 static int
159 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
160                                     uint16_t nb_tx_desc __rte_unused,
161                                     unsigned int socket_id __rte_unused,
162                                     const struct rte_eth_txconf *tx_conf __rte_unused)
163 {
164         struct pmd_internals *internals = dev->data->dev_private;
165         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
166         return 0;
167 }
168
169
170 static void
171 eth_dev_info(struct rte_eth_dev *dev,
172                 struct rte_eth_dev_info *dev_info)
173 {
174         struct pmd_internals *internals = dev->data->dev_private;
175         dev_info->max_mac_addrs = 1;
176         dev_info->max_rx_pktlen = (uint32_t)-1;
177         dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
178         dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
179         dev_info->min_rx_bufsize = 0;
180 }
181
182 static void
183 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
184 {
185         unsigned i;
186         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
187         const struct pmd_internals *internal = dev->data->dev_private;
188
189         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
190                         i < dev->data->nb_rx_queues; i++) {
191                 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
192                 rx_total += stats->q_ipackets[i];
193         }
194
195         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
196                         i < dev->data->nb_tx_queues; i++) {
197                 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
198                 stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
199                 tx_total += stats->q_opackets[i];
200                 tx_err_total += stats->q_errors[i];
201         }
202
203         stats->ipackets = rx_total;
204         stats->opackets = tx_total;
205         stats->oerrors = tx_err_total;
206 }
207
208 static void
209 eth_stats_reset(struct rte_eth_dev *dev)
210 {
211         unsigned i;
212         struct pmd_internals *internal = dev->data->dev_private;
213         for (i = 0; i < dev->data->nb_rx_queues; i++)
214                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
215         for (i = 0; i < dev->data->nb_tx_queues; i++) {
216                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
217                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
218         }
219 }
220
221 static void
222 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
223         uint32_t index __rte_unused)
224 {
225 }
226
227 static void
228 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
229         struct ether_addr *mac_addr __rte_unused,
230         uint32_t index __rte_unused,
231         uint32_t vmdq __rte_unused)
232 {
233 }
234
235 static void
236 eth_queue_release(void *q __rte_unused) { ; }
237 static int
238 eth_link_update(struct rte_eth_dev *dev __rte_unused,
239                 int wait_to_complete __rte_unused) { return 0; }
240
241 static const struct eth_dev_ops ops = {
242         .dev_start = eth_dev_start,
243         .dev_stop = eth_dev_stop,
244         .dev_set_link_up = eth_dev_set_link_up,
245         .dev_set_link_down = eth_dev_set_link_down,
246         .dev_configure = eth_dev_configure,
247         .dev_infos_get = eth_dev_info,
248         .rx_queue_setup = eth_rx_queue_setup,
249         .tx_queue_setup = eth_tx_queue_setup,
250         .rx_queue_release = eth_queue_release,
251         .tx_queue_release = eth_queue_release,
252         .link_update = eth_link_update,
253         .stats_get = eth_stats_get,
254         .stats_reset = eth_stats_reset,
255         .mac_addr_remove = eth_mac_addr_remove,
256         .mac_addr_add = eth_mac_addr_add,
257 };
258
259 static int
260 do_eth_dev_ring_create(const char *name,
261                 struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
262                 struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
263                 const unsigned numa_node, enum dev_action action)
264 {
265         struct rte_eth_dev_data *data = NULL;
266         struct pmd_internals *internals = NULL;
267         struct rte_eth_dev *eth_dev = NULL;
268         unsigned i;
269
270         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
271                         numa_node);
272
273         /* now do all data allocation - for eth_dev structure, dummy pci driver
274          * and internal (private) data
275          */
276         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
277         if (data == NULL) {
278                 rte_errno = ENOMEM;
279                 goto error;
280         }
281
282         data->rx_queues = rte_zmalloc_socket(name,
283                         sizeof(void *) * nb_rx_queues, 0, numa_node);
284         if (data->rx_queues == NULL) {
285                 rte_errno = ENOMEM;
286                 goto error;
287         }
288
289         data->tx_queues = rte_zmalloc_socket(name,
290                         sizeof(void *) * nb_tx_queues, 0, numa_node);
291         if (data->tx_queues == NULL) {
292                 rte_errno = ENOMEM;
293                 goto error;
294         }
295
296         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
297         if (internals == NULL) {
298                 rte_errno = ENOMEM;
299                 goto error;
300         }
301
302         /* reserve an ethdev entry */
303         eth_dev = rte_eth_dev_allocate(name);
304         if (eth_dev == NULL) {
305                 rte_errno = ENOSPC;
306                 goto error;
307         }
308
309         /* now put it all together
310          * - store queue data in internals,
311          * - store numa_node info in eth_dev_data
312          * - point eth_dev_data to internals
313          * - and point eth_dev structure to new eth_dev_data structure
314          */
315         /* NOTE: we'll replace the data element, of originally allocated eth_dev
316          * so the rings are local per-process */
317
318         internals->action = action;
319         internals->max_rx_queues = nb_rx_queues;
320         internals->max_tx_queues = nb_tx_queues;
321         for (i = 0; i < nb_rx_queues; i++) {
322                 internals->rx_ring_queues[i].rng = rx_queues[i];
323                 data->rx_queues[i] = &internals->rx_ring_queues[i];
324         }
325         for (i = 0; i < nb_tx_queues; i++) {
326                 internals->tx_ring_queues[i].rng = tx_queues[i];
327                 data->tx_queues[i] = &internals->tx_ring_queues[i];
328         }
329
330         data->dev_private = internals;
331         data->port_id = eth_dev->data->port_id;
332         memmove(data->name, eth_dev->data->name, sizeof(data->name));
333         data->nb_rx_queues = (uint16_t)nb_rx_queues;
334         data->nb_tx_queues = (uint16_t)nb_tx_queues;
335         data->dev_link = pmd_link;
336         data->mac_addrs = &internals->address;
337
338         eth_dev->data = data;
339         eth_dev->driver = NULL;
340         eth_dev->dev_ops = &ops;
341         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
342         data->kdrv = RTE_KDRV_NONE;
343         data->drv_name = "Rings PMD";
344         data->numa_node = numa_node;
345
346         /* finally assign rx and tx ops */
347         eth_dev->rx_pkt_burst = eth_ring_rx;
348         eth_dev->tx_pkt_burst = eth_ring_tx;
349
350         return data->port_id;
351
352 error:
353         if (data) {
354                 rte_free(data->rx_queues);
355                 rte_free(data->tx_queues);
356         }
357         rte_free(data);
358         rte_free(internals);
359
360         return -1;
361 }
362
363 int
364 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
365                 const unsigned nb_rx_queues,
366                 struct rte_ring *const tx_queues[],
367                 const unsigned nb_tx_queues,
368                 const unsigned numa_node)
369 {
370         /* do some parameter checking */
371         if (rx_queues == NULL && nb_rx_queues > 0) {
372                 rte_errno = EINVAL;
373                 return -1;
374         }
375         if (tx_queues == NULL && nb_tx_queues > 0) {
376                 rte_errno = EINVAL;
377                 return -1;
378         }
379         if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
380                 rte_errno = EINVAL;
381                 return -1;
382         }
383
384         return do_eth_dev_ring_create(name, rx_queues, nb_rx_queues,
385                         tx_queues, nb_tx_queues, numa_node, DEV_ATTACH);
386 }
387
388 int
389 rte_eth_from_ring(struct rte_ring *r)
390 {
391         return rte_eth_from_rings(r->name, &r, 1, &r, 1,
392                         r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
393 }
394
395 static int
396 eth_dev_ring_create(const char *name, const unsigned numa_node,
397                 enum dev_action action)
398 {
399         /* rx and tx are so-called from point of view of first port.
400          * They are inverted from the point of view of second port
401          */
402         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
403         unsigned i;
404         char rng_name[RTE_RING_NAMESIZE];
405         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
406                         RTE_PMD_RING_MAX_TX_RINGS);
407
408         for (i = 0; i < num_rings; i++) {
409                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
410                 rxtx[i] = (action == DEV_CREATE) ?
411                                 rte_ring_create(rng_name, 1024, numa_node,
412                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
413                                 rte_ring_lookup(rng_name);
414                 if (rxtx[i] == NULL)
415                         return -1;
416         }
417
418         if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
419                 numa_node, action) < 0)
420                 return -1;
421
422         return 0;
423 }
424
425 struct node_action_pair {
426         char name[PATH_MAX];
427         unsigned node;
428         enum dev_action action;
429 };
430
431 struct node_action_list {
432         unsigned total;
433         unsigned count;
434         struct node_action_pair *list;
435 };
436
437 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
438 {
439         struct node_action_list *info = data;
440         int ret;
441         char *name;
442         char *action;
443         char *node;
444         char *end;
445
446         name = strdup(value);
447
448         ret = -EINVAL;
449
450         if (!name) {
451                 RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
452                 goto out;
453         }
454
455         node = strchr(name, ':');
456         if (!node) {
457                 RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
458                 goto out;
459         }
460
461         *node = '\0';
462         node++;
463
464         action = strchr(node, ':');
465         if (!action) {
466                 RTE_LOG(WARNING, PMD, "could not action value from %s", node);
467                 goto out;
468         }
469
470         *action = '\0';
471         action++;
472
473         /*
474          * Need to do some sanity checking here
475          */
476
477         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
478                 info->list[info->count].action = DEV_ATTACH;
479         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
480                 info->list[info->count].action = DEV_CREATE;
481         else
482                 goto out;
483
484         errno = 0;
485         info->list[info->count].node = strtol(node, &end, 10);
486
487         if ((errno != 0) || (*end != '\0')) {
488                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
489                 goto out;
490         }
491
492         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
493
494         info->count++;
495
496         ret = 0;
497 out:
498         free(name);
499         return ret;
500 }
501
502 static int
503 rte_pmd_ring_probe(const char *name, const char *params)
504 {
505         struct rte_kvargs *kvlist = NULL;
506         int ret = 0;
507         struct node_action_list *info = NULL;
508
509         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
510
511         if (params == NULL || params[0] == '\0') {
512                 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
513                 if (ret == -1) {
514                         RTE_LOG(INFO, PMD,
515                                 "Attach to pmd_ring for %s\n", name);
516                         ret = eth_dev_ring_create(name, rte_socket_id(),
517                                                   DEV_ATTACH);
518                 }
519         }
520         else {
521                 kvlist = rte_kvargs_parse(params, valid_arguments);
522
523                 if (!kvlist) {
524                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
525                                         " rings-backed ethernet device\n");
526                         ret = eth_dev_ring_create(name, rte_socket_id(),
527                                                   DEV_CREATE);
528                         if (ret == -1) {
529                                 RTE_LOG(INFO, PMD,
530                                         "Attach to pmd_ring for %s\n",
531                                         name);
532                                 ret = eth_dev_ring_create(name, rte_socket_id(),
533                                                           DEV_ATTACH);
534                         }
535                         return ret;
536                 } else {
537                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
538                         info = rte_zmalloc("struct node_action_list",
539                                            sizeof(struct node_action_list) +
540                                            (sizeof(struct node_action_pair) * ret),
541                                            0);
542                         if (!info)
543                                 goto out_free;
544
545                         info->total = ret;
546                         info->list = (struct node_action_pair*)(info + 1);
547
548                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
549                                                  parse_kvlist, info);
550
551                         if (ret < 0)
552                                 goto out_free;
553
554                         for (info->count = 0; info->count < info->total; info->count++) {
555                                 ret = eth_dev_ring_create(info->list[info->count].name,
556                                                           info->list[info->count].node,
557                                                           info->list[info->count].action);
558                                 if ((ret == -1) &&
559                                     (info->list[info->count].action == DEV_CREATE)) {
560                                         RTE_LOG(INFO, PMD,
561                                                 "Attach to pmd_ring for %s\n",
562                                                 name);
563                                         ret = eth_dev_ring_create(name,
564                                                         info->list[info->count].node,
565                                                         DEV_ATTACH);
566                                 }
567                         }
568                 }
569         }
570
571 out_free:
572         rte_kvargs_free(kvlist);
573         rte_free(info);
574         return ret;
575 }
576
577 static int
578 rte_pmd_ring_remove(const char *name)
579 {
580         struct rte_eth_dev *eth_dev = NULL;
581         struct pmd_internals *internals = NULL;
582         struct ring_queue *r = NULL;
583         uint16_t i;
584
585         RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
586
587         if (name == NULL)
588                 return -EINVAL;
589
590         /* find an ethdev entry */
591         eth_dev = rte_eth_dev_allocated(name);
592         if (eth_dev == NULL)
593                 return -ENODEV;
594
595         eth_dev_stop(eth_dev);
596
597         internals = eth_dev->data->dev_private;
598         if (internals->action == DEV_CREATE) {
599                 /*
600                  * it is only necessary to delete the rings in rx_queues because
601                  * they are the same used in tx_queues
602                  */
603                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
604                         r = eth_dev->data->rx_queues[i];
605                         rte_ring_free(r->rng);
606                 }
607         }
608
609         rte_free(eth_dev->data->rx_queues);
610         rte_free(eth_dev->data->tx_queues);
611         rte_free(eth_dev->data->dev_private);
612
613         rte_free(eth_dev->data);
614
615         rte_eth_dev_release_port(eth_dev);
616         return 0;
617 }
618
619 static struct rte_vdev_driver pmd_ring_drv = {
620         .probe = rte_pmd_ring_probe,
621         .remove = rte_pmd_ring_remove,
622 };
623
624 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
625 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
626 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
627         ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");