lib: provide initial versioning
[dpdk.git] / lib / librte_pmd_ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_string_fns.h>
40 #include <rte_dev.h>
41 #include <rte_kvargs.h>
42
43 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
44 #define ETH_RING_ACTION_CREATE          "CREATE"
45 #define ETH_RING_ACTION_ATTACH          "ATTACH"
46
47 static const char *valid_arguments[] = {
48         ETH_RING_NUMA_NODE_ACTION_ARG,
49         NULL
50 };
51
52 struct ring_queue {
53         struct rte_ring *rng;
54         rte_atomic64_t rx_pkts;
55         rte_atomic64_t tx_pkts;
56         rte_atomic64_t err_pkts;
57 };
58
59 struct pmd_internals {
60         unsigned nb_rx_queues;
61         unsigned nb_tx_queues;
62
63         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
64         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
65 };
66
67
68 static struct ether_addr eth_addr = { .addr_bytes = {0} };
69 static const char *drivername = "Rings PMD";
70 static struct rte_eth_link pmd_link = {
71                 .link_speed = 10000,
72                 .link_duplex = ETH_LINK_FULL_DUPLEX,
73                 .link_status = 0
74 };
75
76 static uint16_t
77 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
78 {
79         void **ptrs = (void *)&bufs[0];
80         struct ring_queue *r = q;
81         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
82                         ptrs, nb_bufs);
83         if (r->rng->flags & RING_F_SC_DEQ)
84                 r->rx_pkts.cnt += nb_rx;
85         else
86                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
87         return nb_rx;
88 }
89
90 static uint16_t
91 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
92 {
93         void **ptrs = (void *)&bufs[0];
94         struct ring_queue *r = q;
95         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
96                         ptrs, nb_bufs);
97         if (r->rng->flags & RING_F_SP_ENQ) {
98                 r->tx_pkts.cnt += nb_tx;
99                 r->err_pkts.cnt += nb_bufs - nb_tx;
100         } else {
101                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
102                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
103         }
104         return nb_tx;
105 }
106
107 static int
108 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
109
110 static int
111 eth_dev_start(struct rte_eth_dev *dev)
112 {
113         dev->data->dev_link.link_status = 1;
114         return 0;
115 }
116
117 static void
118 eth_dev_stop(struct rte_eth_dev *dev)
119 {
120         dev->data->dev_link.link_status = 0;
121 }
122
123 static int
124 eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
125                                     uint16_t nb_rx_desc __rte_unused,
126                                     unsigned int socket_id __rte_unused,
127                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
128                                     struct rte_mempool *mb_pool __rte_unused)
129 {
130         struct pmd_internals *internals = dev->data->dev_private;
131         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
132         return 0;
133 }
134
135 static int
136 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
137                                     uint16_t nb_tx_desc __rte_unused,
138                                     unsigned int socket_id __rte_unused,
139                                     const struct rte_eth_txconf *tx_conf __rte_unused)
140 {
141         struct pmd_internals *internals = dev->data->dev_private;
142         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
143         return 0;
144 }
145
146
147 static void
148 eth_dev_info(struct rte_eth_dev *dev,
149                 struct rte_eth_dev_info *dev_info)
150 {
151         struct pmd_internals *internals = dev->data->dev_private;
152         dev_info->driver_name = drivername;
153         dev_info->max_mac_addrs = 1;
154         dev_info->max_rx_pktlen = (uint32_t)-1;
155         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
156         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
157         dev_info->min_rx_bufsize = 0;
158         dev_info->pci_dev = NULL;
159 }
160
161 static void
162 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
163 {
164         unsigned i;
165         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
166         const struct pmd_internals *internal = dev->data->dev_private;
167
168         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
169                         i < internal->nb_rx_queues; i++) {
170                 igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
171                 rx_total += igb_stats->q_ipackets[i];
172         }
173
174         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
175                         i < internal->nb_tx_queues; i++) {
176                 igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
177                 igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
178                 tx_total += igb_stats->q_opackets[i];
179                 tx_err_total += igb_stats->q_errors[i];
180         }
181
182         igb_stats->ipackets = rx_total;
183         igb_stats->opackets = tx_total;
184         igb_stats->oerrors = tx_err_total;
185 }
186
187 static void
188 eth_stats_reset(struct rte_eth_dev *dev)
189 {
190         unsigned i;
191         struct pmd_internals *internal = dev->data->dev_private;
192         for (i = 0; i < internal->nb_rx_queues; i++)
193                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
194         for (i = 0; i < internal->nb_tx_queues; i++) {
195                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
196                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
197         }
198 }
199
200 static void
201 eth_queue_release(void *q __rte_unused) { ; }
202 static int
203 eth_link_update(struct rte_eth_dev *dev __rte_unused,
204                 int wait_to_complete __rte_unused) { return 0; }
205
206 static struct eth_dev_ops ops = {
207                 .dev_start = eth_dev_start,
208                 .dev_stop = eth_dev_stop,
209                 .dev_configure = eth_dev_configure,
210                 .dev_infos_get = eth_dev_info,
211                 .rx_queue_setup = eth_rx_queue_setup,
212                 .tx_queue_setup = eth_tx_queue_setup,
213                 .rx_queue_release = eth_queue_release,
214                 .tx_queue_release = eth_queue_release,
215                 .link_update = eth_link_update,
216                 .stats_get = eth_stats_get,
217                 .stats_reset = eth_stats_reset,
218 };
219
220 int
221 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
222                 const unsigned nb_rx_queues,
223                 struct rte_ring *const tx_queues[],
224                 const unsigned nb_tx_queues,
225                 const unsigned numa_node)
226 {
227         struct rte_eth_dev_data *data = NULL;
228         struct rte_pci_device *pci_dev = NULL;
229         struct pmd_internals *internals = NULL;
230         struct rte_eth_dev *eth_dev = NULL;
231         unsigned i;
232
233         /* do some parameter checking */
234         if (rx_queues == NULL && nb_rx_queues > 0)
235                 goto error;
236         if (tx_queues == NULL && nb_tx_queues > 0)
237                 goto error;
238
239         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
240                         numa_node);
241
242         /* now do all data allocation - for eth_dev structure, dummy pci driver
243          * and internal (private) data
244          */
245         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
246         if (data == NULL)
247                 goto error;
248
249         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
250         if (pci_dev == NULL)
251                 goto error;
252
253         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
254         if (internals == NULL)
255                 goto error;
256
257         /* reserve an ethdev entry */
258         eth_dev = rte_eth_dev_allocate(name);
259         if (eth_dev == NULL)
260                 goto error;
261
262         /* now put it all together
263          * - store queue data in internals,
264          * - store numa_node info in pci_driver
265          * - point eth_dev_data to internals and pci_driver
266          * - and point eth_dev structure to new eth_dev_data structure
267          */
268         /* NOTE: we'll replace the data element, of originally allocated eth_dev
269          * so the rings are local per-process */
270
271         internals->nb_rx_queues = nb_rx_queues;
272         internals->nb_tx_queues = nb_tx_queues;
273         for (i = 0; i < nb_rx_queues; i++) {
274                 internals->rx_ring_queues[i].rng = rx_queues[i];
275         }
276         for (i = 0; i < nb_tx_queues; i++) {
277                 internals->tx_ring_queues[i].rng = tx_queues[i];
278         }
279
280         pci_dev->numa_node = numa_node;
281
282         data->dev_private = internals;
283         data->port_id = eth_dev->data->port_id;
284         data->nb_rx_queues = (uint16_t)nb_rx_queues;
285         data->nb_tx_queues = (uint16_t)nb_tx_queues;
286         data->dev_link = pmd_link;
287         data->mac_addrs = &eth_addr;
288
289         eth_dev ->data = data;
290         eth_dev ->dev_ops = &ops;
291         eth_dev ->pci_dev = pci_dev;
292
293         /* finally assign rx and tx ops */
294         eth_dev->rx_pkt_burst = eth_ring_rx;
295         eth_dev->tx_pkt_burst = eth_ring_tx;
296
297         return 0;
298
299 error:
300         if (data)
301                 rte_free(data);
302         if (pci_dev)
303                 rte_free(pci_dev);
304         if (internals)
305                 rte_free(internals);
306         return -1;
307 }
308
309 enum dev_action{
310         DEV_CREATE,
311         DEV_ATTACH
312 };
313
314 static int
315 eth_dev_ring_create(const char *name, const unsigned numa_node,
316                 enum dev_action action)
317 {
318         /* rx and tx are so-called from point of view of first port.
319          * They are inverted from the point of view of second port
320          */
321         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
322         unsigned i;
323         char rng_name[RTE_RING_NAMESIZE];
324         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
325                         RTE_PMD_RING_MAX_TX_RINGS);
326
327         for (i = 0; i < num_rings; i++) {
328                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
329                 rxtx[i] = (action == DEV_CREATE) ?
330                                 rte_ring_create(rng_name, 1024, numa_node,
331                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
332                                 rte_ring_lookup(rng_name);
333                 if (rxtx[i] == NULL)
334                         return -1;
335         }
336
337         if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node))
338                 return -1;
339
340         return 0;
341 }
342
343
344 static int
345 eth_dev_ring_pair_create(const char *name, const unsigned numa_node,
346                 enum dev_action action)
347 {
348         /* rx and tx are so-called from point of view of first port.
349          * They are inverted from the point of view of second port
350          */
351         struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
352         struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
353         unsigned i;
354         char rx_rng_name[RTE_RING_NAMESIZE];
355         char tx_rng_name[RTE_RING_NAMESIZE];
356         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
357                         RTE_PMD_RING_MAX_TX_RINGS);
358
359         for (i = 0; i < num_rings; i++) {
360                 snprintf(rx_rng_name, sizeof(rx_rng_name), "ETH_RX%u_%s", i, name);
361                 rx[i] = (action == DEV_CREATE) ?
362                                 rte_ring_create(rx_rng_name, 1024, numa_node,
363                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
364                                 rte_ring_lookup(rx_rng_name);
365                 if (rx[i] == NULL)
366                         return -1;
367                 snprintf(tx_rng_name, sizeof(tx_rng_name), "ETH_TX%u_%s", i, name);
368                 tx[i] = (action == DEV_CREATE) ?
369                                 rte_ring_create(tx_rng_name, 1024, numa_node,
370                                                 RING_F_SP_ENQ|RING_F_SC_DEQ):
371                                 rte_ring_lookup(tx_rng_name);
372                 if (tx[i] == NULL)
373                         return -1;
374         }
375
376         if (rte_eth_from_rings(rx_rng_name, rx, num_rings, tx, num_rings,
377                         numa_node) || rte_eth_from_rings(tx_rng_name, tx, num_rings, rx,
378                                         num_rings, numa_node))
379                 return -1;
380
381         return 0;
382 }
383
384 int
385 rte_eth_ring_pair_create(const char *name, const unsigned numa_node)
386 {
387         RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_create is deprecated\n");
388         return eth_dev_ring_pair_create(name, numa_node, DEV_CREATE);
389 }
390
391 int
392 rte_eth_ring_pair_attach(const char *name, const unsigned numa_node)
393 {
394         RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_attach is deprecated\n");
395         return eth_dev_ring_pair_create(name, numa_node, DEV_ATTACH);
396 }
397
398 struct node_action_pair {
399         char name[PATH_MAX];
400         unsigned node;
401         enum dev_action action;
402 };
403
404 struct node_action_list {
405         unsigned total;
406         unsigned count;
407         struct node_action_pair *list;
408 };
409
410 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
411 {
412         struct node_action_list *info = data;
413         int ret;
414         char *name;
415         char *action;
416         char *node;
417         char *end;
418
419         name = strdup(value);
420
421         ret = -EINVAL;
422
423         if (!name) {
424                 RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
425                 goto out;
426         }
427
428         node = strchr(name, ':');
429         if (!node) {
430                 RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
431                 goto out;
432         }
433
434         *node = '\0';
435         node++;
436
437         action = strchr(node, ':');
438         if (!action) {
439                 RTE_LOG(WARNING, PMD, "could not action value from %s", node);
440                 goto out;
441         }
442
443         *action = '\0';
444         action++;
445
446         /*
447          * Need to do some sanity checking here
448          */
449
450         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
451                 info->list[info->count].action = DEV_ATTACH;
452         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
453                 info->list[info->count].action = DEV_CREATE;
454         else
455                 goto out;
456
457         errno = 0;
458         info->list[info->count].node = strtol(node, &end, 10);
459
460         if ((errno != 0) || (*end != '\0')) {
461                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
462                 goto out;
463         }
464
465         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
466
467         info->count++;
468
469         ret = 0;
470 out:
471         free(name);
472         return ret;
473 }
474
475 static int
476 rte_pmd_ring_devinit(const char *name, const char *params)
477 {
478         struct rte_kvargs *kvlist;
479         int ret = 0;
480         struct node_action_list *info = NULL;
481
482         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
483
484         if (params == NULL || params[0] == '\0')
485                 eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
486         else {
487                 kvlist = rte_kvargs_parse(params, valid_arguments);
488
489                 if (!kvlist) {
490                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
491                                         " rings-backed ethernet device\n");
492                         eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
493                         return 0;
494                 } else {
495                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
496                         info = rte_zmalloc("struct node_action_list", sizeof(struct node_action_list) +
497                                            (sizeof(struct node_action_pair) * ret), 0);
498                         if (!info)
499                                 goto out;
500
501                         info->total = ret;
502                         info->list = (struct node_action_pair*)(info + 1);
503
504                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
505                                                  parse_kvlist, info);
506
507                         if (ret < 0)
508                                 goto out_free;
509
510                         for (info->count = 0; info->count < info->total; info->count++) {
511                                 eth_dev_ring_create(name, info->list[info->count].node,
512                                                     info->list[info->count].action);
513                         }
514                 }
515         }
516
517 out_free:
518         rte_free(info);
519 out:
520         return ret;
521 }
522
523 static struct rte_driver pmd_ring_drv = {
524         .name = "eth_ring",
525         .type = PMD_VDEV,
526         .init = rte_pmd_ring_devinit,
527 };
528
529 PMD_REGISTER_DRIVER(pmd_ring_drv);