remove trailing whitespaces
[dpdk.git] / lib / librte_pmd_ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_string_fns.h>
40 #include <rte_dev.h>
41 #include <rte_kvargs.h>
42
43 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
44 #define ETH_RING_ACTION_CREATE          "CREATE"
45 #define ETH_RING_ACTION_ATTACH          "ATTACH"
46
47 static const char *valid_arguments[] = {
48         ETH_RING_NUMA_NODE_ACTION_ARG,
49         NULL
50 };
51
52 struct ring_queue {
53         struct rte_ring *rng;
54         rte_atomic64_t rx_pkts;
55         rte_atomic64_t tx_pkts;
56         rte_atomic64_t err_pkts;
57 };
58
59 struct pmd_internals {
60         unsigned nb_rx_queues;
61         unsigned nb_tx_queues;
62
63         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
64         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
65 };
66
67
68 static struct ether_addr eth_addr = { .addr_bytes = {0} };
69 static const char *drivername = "Rings PMD";
70 static struct rte_eth_link pmd_link = {
71                 .link_speed = 10000,
72                 .link_duplex = ETH_LINK_FULL_DUPLEX,
73                 .link_status = 0
74 };
75
76 static uint16_t
77 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
78 {
79         void **ptrs = (void *)&bufs[0];
80         struct ring_queue *r = q;
81         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
82                         ptrs, nb_bufs);
83         if (r->rng->flags & RING_F_SC_DEQ)
84                 r->rx_pkts.cnt += nb_rx;
85         else
86                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
87         return nb_rx;
88 }
89
90 static uint16_t
91 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
92 {
93         void **ptrs = (void *)&bufs[0];
94         struct ring_queue *r = q;
95         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
96                         ptrs, nb_bufs);
97         if (r->rng->flags & RING_F_SP_ENQ) {
98                 r->tx_pkts.cnt += nb_tx;
99                 r->err_pkts.cnt += nb_bufs - nb_tx;
100         } else {
101                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
102                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
103         }
104         return nb_tx;
105 }
106
107 static int
108 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
109
110 static int
111 eth_dev_start(struct rte_eth_dev *dev)
112 {
113         dev->data->dev_link.link_status = 1;
114         return 0;
115 }
116
117 static void
118 eth_dev_stop(struct rte_eth_dev *dev)
119 {
120         dev->data->dev_link.link_status = 0;
121 }
122
123 static int
124 eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
125                                     uint16_t nb_rx_desc __rte_unused,
126                                     unsigned int socket_id __rte_unused,
127                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
128                                     struct rte_mempool *mb_pool __rte_unused)
129 {
130         struct pmd_internals *internals = dev->data->dev_private;
131         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
132         return 0;
133 }
134
135 static int
136 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
137                                     uint16_t nb_tx_desc __rte_unused,
138                                     unsigned int socket_id __rte_unused,
139                                     const struct rte_eth_txconf *tx_conf __rte_unused)
140 {
141         struct pmd_internals *internals = dev->data->dev_private;
142         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
143         return 0;
144 }
145
146
147 static void
148 eth_dev_info(struct rte_eth_dev *dev,
149                 struct rte_eth_dev_info *dev_info)
150 {
151         struct pmd_internals *internals = dev->data->dev_private;
152         dev_info->driver_name = drivername;
153         dev_info->max_mac_addrs = 1;
154         dev_info->max_rx_pktlen = (uint32_t)-1;
155         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
156         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
157         dev_info->min_rx_bufsize = 0;
158         dev_info->pci_dev = NULL;
159 }
160
161 static void
162 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
163 {
164         unsigned i;
165         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
166         const struct pmd_internals *internal = dev->data->dev_private;
167
168         memset(igb_stats, 0, sizeof(*igb_stats));
169         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
170                         i < internal->nb_rx_queues; i++) {
171                 igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
172                 rx_total += igb_stats->q_ipackets[i];
173         }
174
175         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
176                         i < internal->nb_tx_queues; i++) {
177                 igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
178                 igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
179                 tx_total += igb_stats->q_opackets[i];
180                 tx_err_total += igb_stats->q_errors[i];
181         }
182
183         igb_stats->ipackets = rx_total;
184         igb_stats->opackets = tx_total;
185         igb_stats->oerrors = tx_err_total;
186 }
187
188 static void
189 eth_stats_reset(struct rte_eth_dev *dev)
190 {
191         unsigned i;
192         struct pmd_internals *internal = dev->data->dev_private;
193         for (i = 0; i < internal->nb_rx_queues; i++)
194                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
195         for (i = 0; i < internal->nb_tx_queues; i++) {
196                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
197                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
198         }
199 }
200
201 static void
202 eth_queue_release(void *q __rte_unused) { ; }
203 static int
204 eth_link_update(struct rte_eth_dev *dev __rte_unused,
205                 int wait_to_complete __rte_unused) { return 0; }
206
207 static struct eth_dev_ops ops = {
208                 .dev_start = eth_dev_start,
209                 .dev_stop = eth_dev_stop,
210                 .dev_configure = eth_dev_configure,
211                 .dev_infos_get = eth_dev_info,
212                 .rx_queue_setup = eth_rx_queue_setup,
213                 .tx_queue_setup = eth_tx_queue_setup,
214                 .rx_queue_release = eth_queue_release,
215                 .tx_queue_release = eth_queue_release,
216                 .link_update = eth_link_update,
217                 .stats_get = eth_stats_get,
218                 .stats_reset = eth_stats_reset,
219 };
220
221 int
222 rte_eth_from_rings(struct rte_ring *const rx_queues[],
223                 const unsigned nb_rx_queues,
224                 struct rte_ring *const tx_queues[],
225                 const unsigned nb_tx_queues,
226                 const unsigned numa_node)
227 {
228         struct rte_eth_dev_data *data = NULL;
229         struct rte_pci_device *pci_dev = NULL;
230         struct pmd_internals *internals = NULL;
231         struct rte_eth_dev *eth_dev = NULL;
232         unsigned i;
233
234         /* do some parameter checking */
235         if (rx_queues == NULL && nb_rx_queues > 0)
236                 goto error;
237         if (tx_queues == NULL && nb_tx_queues > 0)
238                 goto error;
239
240         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
241                         numa_node);
242
243         /* now do all data allocation - for eth_dev structure, dummy pci driver
244          * and internal (private) data
245          */
246         data = rte_zmalloc_socket(NULL, sizeof(*data), 0, numa_node);
247         if (data == NULL)
248                 goto error;
249
250         pci_dev = rte_zmalloc_socket(NULL, sizeof(*pci_dev), 0, numa_node);
251         if (pci_dev == NULL)
252                 goto error;
253
254         internals = rte_zmalloc_socket(NULL, sizeof(*internals), 0, numa_node);
255         if (internals == NULL)
256                 goto error;
257
258         /* reserve an ethdev entry */
259         eth_dev = rte_eth_dev_allocate();
260         if (eth_dev == NULL)
261                 goto error;
262
263         /* now put it all together
264          * - store queue data in internals,
265          * - store numa_node info in pci_driver
266          * - point eth_dev_data to internals and pci_driver
267          * - and point eth_dev structure to new eth_dev_data structure
268          */
269         /* NOTE: we'll replace the data element, of originally allocated eth_dev
270          * so the rings are local per-process */
271
272         internals->nb_rx_queues = nb_rx_queues;
273         internals->nb_tx_queues = nb_tx_queues;
274         for (i = 0; i < nb_rx_queues; i++) {
275                 internals->rx_ring_queues[i].rng = rx_queues[i];
276         }
277         for (i = 0; i < nb_tx_queues; i++) {
278                 internals->tx_ring_queues[i].rng = tx_queues[i];
279         }
280
281         pci_dev->numa_node = numa_node;
282
283         data->dev_private = internals;
284         data->port_id = eth_dev->data->port_id;
285         data->nb_rx_queues = (uint16_t)nb_rx_queues;
286         data->nb_tx_queues = (uint16_t)nb_tx_queues;
287         data->dev_link = pmd_link;
288         data->mac_addrs = &eth_addr;
289
290         eth_dev ->data = data;
291         eth_dev ->dev_ops = &ops;
292         eth_dev ->pci_dev = pci_dev;
293
294         /* finally assign rx and tx ops */
295         eth_dev->rx_pkt_burst = eth_ring_rx;
296         eth_dev->tx_pkt_burst = eth_ring_tx;
297
298         return 0;
299
300 error:
301         if (data)
302                 rte_free(data);
303         if (pci_dev)
304                 rte_free(pci_dev);
305         if (internals)
306                 rte_free(internals);
307         return -1;
308 }
309
310 enum dev_action{
311         DEV_CREATE,
312         DEV_ATTACH
313 };
314
315 static int
316 eth_dev_ring_create(const char *name, const unsigned numa_node,
317                 enum dev_action action)
318 {
319         /* rx and tx are so-called from point of view of first port.
320          * They are inverted from the point of view of second port
321          */
322         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
323         unsigned i;
324         char rng_name[RTE_RING_NAMESIZE];
325         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
326                         RTE_PMD_RING_MAX_TX_RINGS);
327
328         for (i = 0; i < num_rings; i++) {
329                 rte_snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
330                 rxtx[i] = (action == DEV_CREATE) ?
331                                 rte_ring_create(rng_name, 1024, numa_node,
332                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
333                                 rte_ring_lookup(rng_name);
334                 if (rxtx[i] == NULL)
335                         return -1;
336         }
337
338         if (rte_eth_from_rings(rxtx, num_rings, rxtx, num_rings, numa_node))
339                 return -1;
340
341         return 0;
342 }
343
344
345 static int
346 eth_dev_ring_pair_create(const char *name, const unsigned numa_node,
347                 enum dev_action action)
348 {
349         /* rx and tx are so-called from point of view of first port.
350          * They are inverted from the point of view of second port
351          */
352         struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
353         struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
354         unsigned i;
355         char rng_name[RTE_RING_NAMESIZE];
356         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
357                         RTE_PMD_RING_MAX_TX_RINGS);
358
359         for (i = 0; i < num_rings; i++) {
360                 rte_snprintf(rng_name, sizeof(rng_name), "ETH_RX%u_%s", i, name);
361                 rx[i] = (action == DEV_CREATE) ?
362                                 rte_ring_create(rng_name, 1024, numa_node,
363                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
364                                 rte_ring_lookup(rng_name);
365                 if (rx[i] == NULL)
366                         return -1;
367                 rte_snprintf(rng_name, sizeof(rng_name), "ETH_TX%u_%s", i, name);
368                 tx[i] = (action == DEV_CREATE) ?
369                                 rte_ring_create(rng_name, 1024, numa_node,
370                                                 RING_F_SP_ENQ|RING_F_SC_DEQ):
371                                 rte_ring_lookup(rng_name);
372                 if (tx[i] == NULL)
373                         return -1;
374         }
375
376         if (rte_eth_from_rings(rx, num_rings, tx, num_rings, numa_node) ||
377                         rte_eth_from_rings(tx, num_rings, rx, num_rings, numa_node) )
378                 return -1;
379
380         return 0;
381 }
382
383 int
384 rte_eth_ring_pair_create(const char *name, const unsigned numa_node)
385 {
386         RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_create is deprecated\n");
387         return eth_dev_ring_pair_create(name, numa_node, DEV_CREATE);
388 }
389
390 int
391 rte_eth_ring_pair_attach(const char *name, const unsigned numa_node)
392 {
393         RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_attach is deprecated\n");
394         return eth_dev_ring_pair_create(name, numa_node, DEV_ATTACH);
395 }
396
397 struct node_action_pair {
398         char name[PATH_MAX];
399         unsigned node;
400         enum dev_action action;
401 };
402
403 struct node_action_list {
404         unsigned total;
405         unsigned count;
406         struct node_action_pair *list;
407 };
408
409 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
410 {
411         struct node_action_list *info = data;
412         int ret;
413         char *name;
414         char *action;
415         char *node;
416         char *end;
417
418         name = strdup(value);
419
420         ret = -EINVAL;
421
422         if (!name) {
423                 RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
424                 goto out;
425         }
426
427         node = strchr(name, ':');
428         if (!node) {
429                 RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
430                 goto out;
431         }
432
433         *node = '\0';
434         node++;
435
436         action = strchr(node, ':');
437         if (!action) {
438                 RTE_LOG(WARNING, PMD, "could not action value from %s", node);
439                 goto out;
440         }
441
442         *action = '\0';
443         action++;
444
445         /*
446          * Need to do some sanity checking here
447          */
448
449         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
450                 info->list[info->count].action = DEV_ATTACH;
451         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
452                 info->list[info->count].action = DEV_CREATE;
453         else
454                 goto out;
455
456         errno = 0;
457         info->list[info->count].node = strtol(node, &end, 10);
458
459         if ((errno != 0) || (*end != '\0')) {
460                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
461                 goto out;
462         }
463
464         rte_snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
465
466         info->count++;
467
468         ret = 0;
469 out:
470         free(name);
471         return ret;
472 }
473
474 int
475 rte_pmd_ring_devinit(const char *name, const char *params)
476 {
477         struct rte_kvargs *kvlist;
478         int ret = 0;
479         struct node_action_list *info = NULL;
480
481         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
482
483         if (params == NULL || params[0] == '\0')
484                 eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
485         else {
486                 kvlist = rte_kvargs_parse(params, valid_arguments);
487
488                 if (!kvlist) {
489                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
490                                         " rings-backed ethernet device\n");
491                         eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
492                         return 0;
493                 } else {
494                         eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
495                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
496                         info = rte_zmalloc("struct node_action_list", sizeof(struct node_action_list) +
497                                            (sizeof(struct node_action_pair) * ret), 0);
498                         if (!info)
499                                 goto out;
500
501                         info->total = ret;
502                         info->list = (struct node_action_pair*)(info + 1);
503
504                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
505                                                  parse_kvlist, info);
506
507                         if (ret < 0)
508                                 goto out_free;
509
510                         for (info->count = 0; info->count < info->total; info->count++) {
511                                 eth_dev_ring_pair_create(name, info->list[info->count].node,
512                                                     info->list[info->count].action);
513                         }
514                 }
515         }
516
517 out_free:
518         rte_free(info);
519 out:
520         return ret;
521 }
522
523 static struct rte_driver pmd_ring_drv = {
524         .name = "eth_ring",
525         .type = PMD_VDEV,
526         .init = rte_pmd_ring_devinit,
527 };
528
529 PMD_REGISTER_DRIVER(pmd_ring_drv);