cee3ffffee0d1b9fadbf694a4c1a919c1efcb235
[dpdk.git] / lib / librte_pmd_ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_string_fns.h>
40 #include <rte_vdev.h>
41
42 struct ring_queue {
43         struct rte_ring *rng;
44         rte_atomic64_t rx_pkts;
45         rte_atomic64_t tx_pkts;
46         rte_atomic64_t err_pkts;
47 };
48
49 struct pmd_internals {
50         unsigned nb_rx_queues;
51         unsigned nb_tx_queues;
52
53         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
54         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
55 };
56
57
58 static struct ether_addr eth_addr = { .addr_bytes = {0} };
59 static const char *drivername = "Rings PMD";
60 static struct rte_eth_link pmd_link = {
61                 .link_speed = 10000,
62                 .link_duplex = ETH_LINK_FULL_DUPLEX,
63                 .link_status = 0
64 };
65
66 static uint16_t
67 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
68 {
69         void **ptrs = (void *)&bufs[0];
70         struct ring_queue *r = q;
71         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, 
72                         ptrs, nb_bufs);
73         if (r->rng->flags & RING_F_SC_DEQ)
74                 r->rx_pkts.cnt += nb_rx;
75         else
76                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
77         return nb_rx;
78 }
79
80 static uint16_t
81 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
82 {
83         void **ptrs = (void *)&bufs[0];
84         struct ring_queue *r = q;
85         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, 
86                         ptrs, nb_bufs);
87         if (r->rng->flags & RING_F_SP_ENQ) {
88                 r->tx_pkts.cnt += nb_tx;
89                 r->err_pkts.cnt += nb_bufs - nb_tx;
90         } else {
91                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
92                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
93         }
94         return nb_tx;
95 }
96
97 static int
98 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
99
100 static int
101 eth_dev_start(struct rte_eth_dev *dev)
102 {
103         dev->data->dev_link.link_status = 1;
104         return 0;
105 }
106
107 static void
108 eth_dev_stop(struct rte_eth_dev *dev)
109 {
110         dev->data->dev_link.link_status = 0;
111 }
112
113 static int
114 eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
115                                     uint16_t nb_rx_desc __rte_unused,
116                                     unsigned int socket_id __rte_unused,
117                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
118                                     struct rte_mempool *mb_pool __rte_unused)
119 {
120         struct pmd_internals *internals = dev->data->dev_private;
121         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
122         return 0;
123 }
124
125 static int
126 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
127                                     uint16_t nb_tx_desc __rte_unused,
128                                     unsigned int socket_id __rte_unused,
129                                     const struct rte_eth_txconf *tx_conf __rte_unused)
130 {
131         struct pmd_internals *internals = dev->data->dev_private;
132         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
133         return 0;
134 }
135
136
137 static void
138 eth_dev_info(struct rte_eth_dev *dev,
139                 struct rte_eth_dev_info *dev_info)
140 {
141         struct pmd_internals *internals = dev->data->dev_private;
142         dev_info->driver_name = drivername;
143         dev_info->max_mac_addrs = 1;
144         dev_info->max_rx_pktlen = (uint32_t)-1;
145         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
146         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
147         dev_info->min_rx_bufsize = 0;
148         dev_info->pci_dev = NULL;
149 }
150
151 static void
152 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
153 {
154         unsigned i;
155         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
156         const struct pmd_internals *internal = dev->data->dev_private;
157
158         memset(igb_stats, 0, sizeof(*igb_stats));
159         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
160                         i < internal->nb_rx_queues; i++) {
161                 igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
162                 rx_total += igb_stats->q_ipackets[i];
163         }
164
165         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
166                         i < internal->nb_tx_queues; i++) {
167                 igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
168                 igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
169                 tx_total += igb_stats->q_opackets[i];
170                 tx_err_total += igb_stats->q_errors[i];
171         }
172
173         igb_stats->ipackets = rx_total;
174         igb_stats->opackets = tx_total;
175         igb_stats->oerrors = tx_err_total;
176 }
177
178 static void
179 eth_stats_reset(struct rte_eth_dev *dev)
180 {
181         unsigned i;
182         struct pmd_internals *internal = dev->data->dev_private;
183         for (i = 0; i < internal->nb_rx_queues; i++)
184                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
185         for (i = 0; i < internal->nb_tx_queues; i++) {
186                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
187                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
188         }
189 }
190
191 static void
192 eth_queue_release(void *q __rte_unused) { ; }
193 static int
194 eth_link_update(struct rte_eth_dev *dev __rte_unused,
195                 int wait_to_complete __rte_unused) { return 0; }
196
197 static struct eth_dev_ops ops = {
198                 .dev_start = eth_dev_start,
199                 .dev_stop = eth_dev_stop,
200                 .dev_configure = eth_dev_configure,
201                 .dev_infos_get = eth_dev_info,
202                 .rx_queue_setup = eth_rx_queue_setup,
203                 .tx_queue_setup = eth_tx_queue_setup,
204                 .rx_queue_release = eth_queue_release,
205                 .tx_queue_release = eth_queue_release,
206                 .link_update = eth_link_update,
207                 .stats_get = eth_stats_get,
208                 .stats_reset = eth_stats_reset,
209 };
210
211 int
212 rte_eth_from_rings(struct rte_ring *const rx_queues[],
213                 const unsigned nb_rx_queues,
214                 struct rte_ring *const tx_queues[],
215                 const unsigned nb_tx_queues,
216                 const unsigned numa_node)
217 {
218         struct rte_eth_dev_data *data = NULL;
219         struct rte_pci_device *pci_dev = NULL;
220         struct pmd_internals *internals = NULL;
221         struct rte_eth_dev *eth_dev = NULL;
222         unsigned i;
223
224         /* do some paramter checking */
225         if (rx_queues == NULL && nb_rx_queues > 0)
226                 goto error;
227         if (tx_queues == NULL && nb_tx_queues > 0)
228                 goto error;
229
230         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
231                         numa_node);
232
233         /* now do all data allocation - for eth_dev structure, dummy pci driver
234          * and internal (private) data
235          */
236         data = rte_zmalloc_socket(NULL, sizeof(*data), 0, numa_node);
237         if (data == NULL)
238                 goto error;
239
240         pci_dev = rte_zmalloc_socket(NULL, sizeof(*pci_dev), 0, numa_node);
241         if (pci_dev == NULL)
242                 goto error;
243
244         internals = rte_zmalloc_socket(NULL, sizeof(*internals), 0, numa_node);
245         if (internals == NULL)
246                 goto error;
247
248         /* reserve an ethdev entry */
249         eth_dev = rte_eth_dev_allocate();
250         if (eth_dev == NULL)
251                 goto error;
252
253         /* now put it all together
254          * - store queue data in internals,
255          * - store numa_node info in pci_driver
256          * - point eth_dev_data to internals and pci_driver
257          * - and point eth_dev structure to new eth_dev_data structure
258          */
259         /* NOTE: we'll replace the data element, of originally allocated eth_dev
260          * so the rings are local per-process */
261
262         internals->nb_rx_queues = nb_rx_queues;
263         internals->nb_tx_queues = nb_tx_queues;
264         for (i = 0; i < nb_rx_queues; i++) {
265                 internals->rx_ring_queues[i].rng = rx_queues[i];
266         }
267         for (i = 0; i < nb_tx_queues; i++) {
268                 internals->tx_ring_queues[i].rng = tx_queues[i];
269         }
270
271         pci_dev->numa_node = numa_node;
272
273         data->dev_private = internals;
274         data->port_id = eth_dev->data->port_id;
275         data->nb_rx_queues = (uint16_t)nb_rx_queues;
276         data->nb_tx_queues = (uint16_t)nb_tx_queues;
277         data->dev_link = pmd_link;
278         data->mac_addrs = &eth_addr;
279
280         eth_dev ->data = data;
281         eth_dev ->dev_ops = &ops;
282         eth_dev ->pci_dev = pci_dev;
283
284         /* finally assign rx and tx ops */
285         eth_dev->rx_pkt_burst = eth_ring_rx;
286         eth_dev->tx_pkt_burst = eth_ring_tx;
287
288         return 0;
289
290 error:
291         if (data)
292                 rte_free(data);
293         if (pci_dev)
294                 rte_free(pci_dev);
295         if (internals)
296                 rte_free(internals);
297         return -1;
298 }
299
300 enum dev_action{
301         DEV_CREATE,
302         DEV_ATTACH
303 };
304
305 static int
306 eth_dev_ring_create(const char *name, const unsigned numa_node,
307                 enum dev_action action)
308 {
309         /* rx and tx are so-called from point of view of first port.
310          * They are inverted from the point of view of second port
311          */
312         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
313         unsigned i;
314         char rng_name[RTE_RING_NAMESIZE];
315         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
316                         RTE_PMD_RING_MAX_TX_RINGS);
317
318         for (i = 0; i < num_rings; i++) {
319                 rte_snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
320                 rxtx[i] = (action == DEV_CREATE) ?
321                                 rte_ring_create(rng_name, 1024, numa_node,
322                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
323                                 rte_ring_lookup(rng_name);
324                 if (rxtx[i] == NULL)
325                         return -1;
326         }
327
328         if (rte_eth_from_rings(rxtx, num_rings, rxtx, num_rings, numa_node))
329                 return -1;
330
331         return 0;
332 }
333
334
335 static int
336 eth_dev_ring_pair_create(const char *name, const unsigned numa_node,
337                 enum dev_action action)
338 {
339         /* rx and tx are so-called from point of view of first port.
340          * They are inverted from the point of view of second port
341          */
342         struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
343         struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
344         unsigned i;
345         char rng_name[RTE_RING_NAMESIZE];
346         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
347                         RTE_PMD_RING_MAX_TX_RINGS);
348
349         for (i = 0; i < num_rings; i++) {
350                 rte_snprintf(rng_name, sizeof(rng_name), "ETH_RX%u_%s", i, name);
351                 rx[i] = (action == DEV_CREATE) ?
352                                 rte_ring_create(rng_name, 1024, numa_node,
353                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
354                                 rte_ring_lookup(rng_name);
355                 if (rx[i] == NULL)
356                         return -1;
357                 rte_snprintf(rng_name, sizeof(rng_name), "ETH_TX%u_%s", i, name);
358                 tx[i] = (action == DEV_CREATE) ?
359                                 rte_ring_create(rng_name, 1024, numa_node,
360                                                 RING_F_SP_ENQ|RING_F_SC_DEQ):
361                                 rte_ring_lookup(rng_name);
362                 if (tx[i] == NULL)
363                         return -1;
364         }
365
366         if (rte_eth_from_rings(rx, num_rings, tx, num_rings, numa_node) ||
367                         rte_eth_from_rings(tx, num_rings, rx, num_rings, numa_node) )
368                 return -1;
369
370         return 0;
371 }
372
373 int
374 rte_eth_ring_pair_create(const char *name, const unsigned numa_node)
375 {
376         return eth_dev_ring_pair_create(name, numa_node, DEV_CREATE);
377 }
378
379 int
380 rte_eth_ring_pair_attach(const char *name, const unsigned numa_node)
381 {
382         return eth_dev_ring_pair_create(name, numa_node, DEV_ATTACH);
383 }
384
385 int
386 rte_pmd_ring_devinit(const char *name, const char *params)
387 {
388         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
389
390         if (params == NULL || params[0] == '\0')
391                 eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
392         else {
393                 RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
394                                 " rings-backed ethernet device\n");
395                 eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
396         }
397         return 0;
398 }
399
400 static struct rte_vdev_driver pmd_ring_drv = {
401         .name = "eth_ring",
402         .init = rte_pmd_ring_devinit,
403 };
404
405 __attribute__((constructor))
406 static void
407 rte_pmd_ring_init(void)
408 {
409         rte_eal_vdev_driver_register(&pmd_ring_drv);
410 }