ring: fix return of new port id on creation
[dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_string_fns.h>
40 #include <rte_dev.h>
41 #include <rte_kvargs.h>
42
43 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
44 #define ETH_RING_ACTION_CREATE          "CREATE"
45 #define ETH_RING_ACTION_ATTACH          "ATTACH"
46
47 static const char *ring_ethdev_driver_name = "Ring PMD";
48
49 static const char *valid_arguments[] = {
50         ETH_RING_NUMA_NODE_ACTION_ARG,
51         NULL
52 };
53
54 struct ring_queue {
55         struct rte_ring *rng;
56         rte_atomic64_t rx_pkts;
57         rte_atomic64_t tx_pkts;
58         rte_atomic64_t err_pkts;
59 };
60
61 struct pmd_internals {
62         unsigned nb_rx_queues;
63         unsigned nb_tx_queues;
64
65         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
66         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
67
68         struct ether_addr address;
69 };
70
71
72 static const char *drivername = "Rings PMD";
73 static struct rte_eth_link pmd_link = {
74                 .link_speed = 10000,
75                 .link_duplex = ETH_LINK_FULL_DUPLEX,
76                 .link_status = 0
77 };
78
79 static uint16_t
80 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
81 {
82         void **ptrs = (void *)&bufs[0];
83         struct ring_queue *r = q;
84         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
85                         ptrs, nb_bufs);
86         if (r->rng->flags & RING_F_SC_DEQ)
87                 r->rx_pkts.cnt += nb_rx;
88         else
89                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
90         return nb_rx;
91 }
92
93 static uint16_t
94 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
95 {
96         void **ptrs = (void *)&bufs[0];
97         struct ring_queue *r = q;
98         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
99                         ptrs, nb_bufs);
100         if (r->rng->flags & RING_F_SP_ENQ) {
101                 r->tx_pkts.cnt += nb_tx;
102                 r->err_pkts.cnt += nb_bufs - nb_tx;
103         } else {
104                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
105                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
106         }
107         return nb_tx;
108 }
109
110 static int
111 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
112
113 static int
114 eth_dev_start(struct rte_eth_dev *dev)
115 {
116         dev->data->dev_link.link_status = 1;
117         return 0;
118 }
119
120 static void
121 eth_dev_stop(struct rte_eth_dev *dev)
122 {
123         dev->data->dev_link.link_status = 0;
124 }
125
126 static int
127 eth_dev_set_link_down(struct rte_eth_dev *dev)
128 {
129         dev->data->dev_link.link_status = 0;
130         return 0;
131 }
132
133 static int
134 eth_dev_set_link_up(struct rte_eth_dev *dev)
135 {
136         dev->data->dev_link.link_status = 1;
137         return 0;
138 }
139
140 static int
141 eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
142                                     uint16_t nb_rx_desc __rte_unused,
143                                     unsigned int socket_id __rte_unused,
144                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
145                                     struct rte_mempool *mb_pool __rte_unused)
146 {
147         struct pmd_internals *internals = dev->data->dev_private;
148         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
149         return 0;
150 }
151
152 static int
153 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
154                                     uint16_t nb_tx_desc __rte_unused,
155                                     unsigned int socket_id __rte_unused,
156                                     const struct rte_eth_txconf *tx_conf __rte_unused)
157 {
158         struct pmd_internals *internals = dev->data->dev_private;
159         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
160         return 0;
161 }
162
163
164 static void
165 eth_dev_info(struct rte_eth_dev *dev,
166                 struct rte_eth_dev_info *dev_info)
167 {
168         struct pmd_internals *internals = dev->data->dev_private;
169         dev_info->driver_name = drivername;
170         dev_info->max_mac_addrs = 1;
171         dev_info->max_rx_pktlen = (uint32_t)-1;
172         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
173         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
174         dev_info->min_rx_bufsize = 0;
175         dev_info->pci_dev = NULL;
176 }
177
178 static void
179 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
180 {
181         unsigned i;
182         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
183         const struct pmd_internals *internal = dev->data->dev_private;
184
185         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
186                         i < internal->nb_rx_queues; i++) {
187                 igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
188                 rx_total += igb_stats->q_ipackets[i];
189         }
190
191         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
192                         i < internal->nb_tx_queues; i++) {
193                 igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
194                 igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
195                 tx_total += igb_stats->q_opackets[i];
196                 tx_err_total += igb_stats->q_errors[i];
197         }
198
199         igb_stats->ipackets = rx_total;
200         igb_stats->opackets = tx_total;
201         igb_stats->oerrors = tx_err_total;
202 }
203
204 static void
205 eth_stats_reset(struct rte_eth_dev *dev)
206 {
207         unsigned i;
208         struct pmd_internals *internal = dev->data->dev_private;
209         for (i = 0; i < internal->nb_rx_queues; i++)
210                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
211         for (i = 0; i < internal->nb_tx_queues; i++) {
212                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
213                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
214         }
215 }
216
217 static void
218 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
219         uint32_t index __rte_unused)
220 {
221 }
222
223 static void
224 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
225         struct ether_addr *mac_addr __rte_unused,
226         uint32_t index __rte_unused,
227         uint32_t vmdq __rte_unused)
228 {
229 }
230
231 static void
232 eth_queue_release(void *q __rte_unused) { ; }
233 static int
234 eth_link_update(struct rte_eth_dev *dev __rte_unused,
235                 int wait_to_complete __rte_unused) { return 0; }
236
237 static const struct eth_dev_ops ops = {
238         .dev_start = eth_dev_start,
239         .dev_stop = eth_dev_stop,
240         .dev_set_link_up = eth_dev_set_link_up,
241         .dev_set_link_down = eth_dev_set_link_down,
242         .dev_configure = eth_dev_configure,
243         .dev_infos_get = eth_dev_info,
244         .rx_queue_setup = eth_rx_queue_setup,
245         .tx_queue_setup = eth_tx_queue_setup,
246         .rx_queue_release = eth_queue_release,
247         .tx_queue_release = eth_queue_release,
248         .link_update = eth_link_update,
249         .stats_get = eth_stats_get,
250         .stats_reset = eth_stats_reset,
251         .mac_addr_remove = eth_mac_addr_remove,
252         .mac_addr_add = eth_mac_addr_add,
253 };
254
255 int
256 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
257                 const unsigned nb_rx_queues,
258                 struct rte_ring *const tx_queues[],
259                 const unsigned nb_tx_queues,
260                 const unsigned numa_node)
261 {
262         struct rte_eth_dev_data *data = NULL;
263         struct rte_pci_device *pci_dev = NULL;
264         struct pmd_internals *internals = NULL;
265         struct rte_eth_dev *eth_dev = NULL;
266         struct eth_driver *eth_drv = NULL;
267         struct rte_pci_id *id_table = NULL;
268
269         unsigned i;
270
271         /* do some parameter checking */
272         if (rx_queues == NULL && nb_rx_queues > 0)
273                 goto error;
274         if (tx_queues == NULL && nb_tx_queues > 0)
275                 goto error;
276
277         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
278                         numa_node);
279
280         /* now do all data allocation - for eth_dev structure, dummy pci driver
281          * and internal (private) data
282          */
283         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
284         if (data == NULL)
285                 goto error;
286
287         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
288         if (pci_dev == NULL)
289                 goto error;
290
291         id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, numa_node);
292         if (id_table == NULL)
293                 goto error;
294
295         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
296         if (internals == NULL)
297                 goto error;
298
299         /* reserve an ethdev entry */
300         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
301         if (eth_dev == NULL)
302                 goto error;
303
304         eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, numa_node);
305         if (eth_drv == NULL)
306                 goto error;
307
308         /* now put it all together
309          * - store queue data in internals,
310          * - store numa_node info in pci_driver
311          * - point eth_dev_data to internals and pci_driver
312          * - and point eth_dev structure to new eth_dev_data structure
313          */
314         /* NOTE: we'll replace the data element, of originally allocated eth_dev
315          * so the rings are local per-process */
316
317         internals->nb_rx_queues = nb_rx_queues;
318         internals->nb_tx_queues = nb_tx_queues;
319         for (i = 0; i < nb_rx_queues; i++) {
320                 internals->rx_ring_queues[i].rng = rx_queues[i];
321         }
322         for (i = 0; i < nb_tx_queues; i++) {
323                 internals->tx_ring_queues[i].rng = tx_queues[i];
324         }
325
326         eth_drv->pci_drv.name = ring_ethdev_driver_name;
327         eth_drv->pci_drv.id_table = id_table;
328
329         pci_dev->numa_node = numa_node;
330         pci_dev->driver = &eth_drv->pci_drv;
331
332         data->dev_private = internals;
333         data->port_id = eth_dev->data->port_id;
334         data->nb_rx_queues = (uint16_t)nb_rx_queues;
335         data->nb_tx_queues = (uint16_t)nb_tx_queues;
336         data->dev_link = pmd_link;
337         data->mac_addrs = &internals->address;
338
339         eth_dev->data = data;
340         eth_dev->driver = eth_drv;
341         eth_dev->dev_ops = &ops;
342         eth_dev->pci_dev = pci_dev;
343         TAILQ_INIT(&(eth_dev->link_intr_cbs));
344
345         /* finally assign rx and tx ops */
346         eth_dev->rx_pkt_burst = eth_ring_rx;
347         eth_dev->tx_pkt_burst = eth_ring_tx;
348
349         return data->port_id;
350
351 error:
352         rte_free(data);
353         rte_free(pci_dev);
354         rte_free(internals);
355
356         return -1;
357 }
358
359 enum dev_action{
360         DEV_CREATE,
361         DEV_ATTACH
362 };
363
364 static int
365 eth_dev_ring_create(const char *name, const unsigned numa_node,
366                 enum dev_action action)
367 {
368         /* rx and tx are so-called from point of view of first port.
369          * They are inverted from the point of view of second port
370          */
371         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
372         unsigned i;
373         char rng_name[RTE_RING_NAMESIZE];
374         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
375                         RTE_PMD_RING_MAX_TX_RINGS);
376
377         for (i = 0; i < num_rings; i++) {
378                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
379                 rxtx[i] = (action == DEV_CREATE) ?
380                                 rte_ring_create(rng_name, 1024, numa_node,
381                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
382                                 rte_ring_lookup(rng_name);
383                 if (rxtx[i] == NULL)
384                         return -1;
385         }
386
387         if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node) < 0)
388                 return -1;
389
390         return 0;
391 }
392
393
394 static int
395 eth_dev_ring_pair_create(const char *name, const unsigned numa_node,
396                 enum dev_action action)
397 {
398         /* rx and tx are so-called from point of view of first port.
399          * They are inverted from the point of view of second port
400          */
401         struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
402         struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
403         unsigned i;
404         char rx_rng_name[RTE_RING_NAMESIZE];
405         char tx_rng_name[RTE_RING_NAMESIZE];
406         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
407                         RTE_PMD_RING_MAX_TX_RINGS);
408
409         for (i = 0; i < num_rings; i++) {
410                 snprintf(rx_rng_name, sizeof(rx_rng_name), "ETH_RX%u_%s", i, name);
411                 rx[i] = (action == DEV_CREATE) ?
412                                 rte_ring_create(rx_rng_name, 1024, numa_node,
413                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
414                                 rte_ring_lookup(rx_rng_name);
415                 if (rx[i] == NULL)
416                         return -1;
417                 snprintf(tx_rng_name, sizeof(tx_rng_name), "ETH_TX%u_%s", i, name);
418                 tx[i] = (action == DEV_CREATE) ?
419                                 rte_ring_create(tx_rng_name, 1024, numa_node,
420                                                 RING_F_SP_ENQ|RING_F_SC_DEQ):
421                                 rte_ring_lookup(tx_rng_name);
422                 if (tx[i] == NULL)
423                         return -1;
424         }
425
426         if (rte_eth_from_rings(rx_rng_name, rx, num_rings, tx, num_rings,
427                                 numa_node) < 0 ||
428                         rte_eth_from_rings(tx_rng_name, tx, num_rings, rx,
429                                 num_rings, numa_node) < 0)
430                 return -1;
431
432         return 0;
433 }
434
435 int
436 rte_eth_ring_pair_create(const char *name, const unsigned numa_node)
437 {
438         RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_create is deprecated\n");
439         return eth_dev_ring_pair_create(name, numa_node, DEV_CREATE);
440 }
441
442 int
443 rte_eth_ring_pair_attach(const char *name, const unsigned numa_node)
444 {
445         RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_attach is deprecated\n");
446         return eth_dev_ring_pair_create(name, numa_node, DEV_ATTACH);
447 }
448
449 struct node_action_pair {
450         char name[PATH_MAX];
451         unsigned node;
452         enum dev_action action;
453 };
454
455 struct node_action_list {
456         unsigned total;
457         unsigned count;
458         struct node_action_pair *list;
459 };
460
461 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
462 {
463         struct node_action_list *info = data;
464         int ret;
465         char *name;
466         char *action;
467         char *node;
468         char *end;
469
470         name = strdup(value);
471
472         ret = -EINVAL;
473
474         if (!name) {
475                 RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
476                 goto out;
477         }
478
479         node = strchr(name, ':');
480         if (!node) {
481                 RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
482                 goto out;
483         }
484
485         *node = '\0';
486         node++;
487
488         action = strchr(node, ':');
489         if (!action) {
490                 RTE_LOG(WARNING, PMD, "could not action value from %s", node);
491                 goto out;
492         }
493
494         *action = '\0';
495         action++;
496
497         /*
498          * Need to do some sanity checking here
499          */
500
501         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
502                 info->list[info->count].action = DEV_ATTACH;
503         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
504                 info->list[info->count].action = DEV_CREATE;
505         else
506                 goto out;
507
508         errno = 0;
509         info->list[info->count].node = strtol(node, &end, 10);
510
511         if ((errno != 0) || (*end != '\0')) {
512                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
513                 goto out;
514         }
515
516         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
517
518         info->count++;
519
520         ret = 0;
521 out:
522         free(name);
523         return ret;
524 }
525
526 static int
527 rte_pmd_ring_devinit(const char *name, const char *params)
528 {
529         struct rte_kvargs *kvlist = NULL;
530         int ret = 0;
531         struct node_action_list *info = NULL;
532
533         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
534
535         if (params == NULL || params[0] == '\0')
536                 eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
537         else {
538                 kvlist = rte_kvargs_parse(params, valid_arguments);
539
540                 if (!kvlist) {
541                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
542                                         " rings-backed ethernet device\n");
543                         eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
544                         return 0;
545                 } else {
546                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
547                         info = rte_zmalloc("struct node_action_list", sizeof(struct node_action_list) +
548                                            (sizeof(struct node_action_pair) * ret), 0);
549                         if (!info)
550                                 goto out_free;
551
552                         info->total = ret;
553                         info->list = (struct node_action_pair*)(info + 1);
554
555                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
556                                                  parse_kvlist, info);
557
558                         if (ret < 0)
559                                 goto out_free;
560
561                         for (info->count = 0; info->count < info->total; info->count++) {
562                                 eth_dev_ring_create(name, info->list[info->count].node,
563                                                     info->list[info->count].action);
564                         }
565                 }
566         }
567
568 out_free:
569         rte_kvargs_free(kvlist);
570         rte_free(info);
571         return ret;
572 }
573
574 static struct rte_driver pmd_ring_drv = {
575         .name = "eth_ring",
576         .type = PMD_VDEV,
577         .init = rte_pmd_ring_devinit,
578 };
579
580 PMD_REGISTER_DRIVER(pmd_ring_drv);