4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "rte_eth_ring.h"
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_memzone.h>
40 #include <rte_string_fns.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
45 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
46 #define ETH_RING_ACTION_CREATE "CREATE"
47 #define ETH_RING_ACTION_ATTACH "ATTACH"
49 static const char *ring_ethdev_driver_name = "Ring PMD";
51 static const char *valid_arguments[] = {
52 ETH_RING_NUMA_NODE_ACTION_ARG,
58 rte_atomic64_t rx_pkts;
59 rte_atomic64_t tx_pkts;
60 rte_atomic64_t err_pkts;
63 struct pmd_internals {
64 unsigned nb_rx_queues;
65 unsigned nb_tx_queues;
67 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
68 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
70 struct ether_addr address;
74 static const char *drivername = "Rings PMD";
75 static struct rte_eth_link pmd_link = {
77 .link_duplex = ETH_LINK_FULL_DUPLEX,
82 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
84 void **ptrs = (void *)&bufs[0];
85 struct ring_queue *r = q;
86 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
88 if (r->rng->flags & RING_F_SC_DEQ)
89 r->rx_pkts.cnt += nb_rx;
91 rte_atomic64_add(&(r->rx_pkts), nb_rx);
96 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
98 void **ptrs = (void *)&bufs[0];
99 struct ring_queue *r = q;
100 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
102 if (r->rng->flags & RING_F_SP_ENQ) {
103 r->tx_pkts.cnt += nb_tx;
104 r->err_pkts.cnt += nb_bufs - nb_tx;
106 rte_atomic64_add(&(r->tx_pkts), nb_tx);
107 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
113 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
116 eth_dev_start(struct rte_eth_dev *dev)
118 dev->data->dev_link.link_status = 1;
123 eth_dev_stop(struct rte_eth_dev *dev)
125 dev->data->dev_link.link_status = 0;
129 eth_dev_set_link_down(struct rte_eth_dev *dev)
131 dev->data->dev_link.link_status = 0;
136 eth_dev_set_link_up(struct rte_eth_dev *dev)
138 dev->data->dev_link.link_status = 1;
143 eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
144 uint16_t nb_rx_desc __rte_unused,
145 unsigned int socket_id __rte_unused,
146 const struct rte_eth_rxconf *rx_conf __rte_unused,
147 struct rte_mempool *mb_pool __rte_unused)
149 struct pmd_internals *internals = dev->data->dev_private;
150 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
155 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
156 uint16_t nb_tx_desc __rte_unused,
157 unsigned int socket_id __rte_unused,
158 const struct rte_eth_txconf *tx_conf __rte_unused)
160 struct pmd_internals *internals = dev->data->dev_private;
161 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
167 eth_dev_info(struct rte_eth_dev *dev,
168 struct rte_eth_dev_info *dev_info)
170 struct pmd_internals *internals = dev->data->dev_private;
171 dev_info->driver_name = drivername;
172 dev_info->max_mac_addrs = 1;
173 dev_info->max_rx_pktlen = (uint32_t)-1;
174 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
175 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
176 dev_info->min_rx_bufsize = 0;
177 dev_info->pci_dev = NULL;
181 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
184 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
185 const struct pmd_internals *internal = dev->data->dev_private;
187 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
188 i < internal->nb_rx_queues; i++) {
189 igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
190 rx_total += igb_stats->q_ipackets[i];
193 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
194 i < internal->nb_tx_queues; i++) {
195 igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
196 igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
197 tx_total += igb_stats->q_opackets[i];
198 tx_err_total += igb_stats->q_errors[i];
201 igb_stats->ipackets = rx_total;
202 igb_stats->opackets = tx_total;
203 igb_stats->oerrors = tx_err_total;
207 eth_stats_reset(struct rte_eth_dev *dev)
210 struct pmd_internals *internal = dev->data->dev_private;
211 for (i = 0; i < internal->nb_rx_queues; i++)
212 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
213 for (i = 0; i < internal->nb_tx_queues; i++) {
214 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
215 internal->tx_ring_queues[i].err_pkts.cnt = 0;
220 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
221 uint32_t index __rte_unused)
226 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
227 struct ether_addr *mac_addr __rte_unused,
228 uint32_t index __rte_unused,
229 uint32_t vmdq __rte_unused)
234 eth_queue_release(void *q __rte_unused) { ; }
236 eth_link_update(struct rte_eth_dev *dev __rte_unused,
237 int wait_to_complete __rte_unused) { return 0; }
239 static const struct eth_dev_ops ops = {
240 .dev_start = eth_dev_start,
241 .dev_stop = eth_dev_stop,
242 .dev_set_link_up = eth_dev_set_link_up,
243 .dev_set_link_down = eth_dev_set_link_down,
244 .dev_configure = eth_dev_configure,
245 .dev_infos_get = eth_dev_info,
246 .rx_queue_setup = eth_rx_queue_setup,
247 .tx_queue_setup = eth_tx_queue_setup,
248 .rx_queue_release = eth_queue_release,
249 .tx_queue_release = eth_queue_release,
250 .link_update = eth_link_update,
251 .stats_get = eth_stats_get,
252 .stats_reset = eth_stats_reset,
253 .mac_addr_remove = eth_mac_addr_remove,
254 .mac_addr_add = eth_mac_addr_add,
257 static struct eth_driver rte_ring_pmd = {
259 .name = "rte_ring_pmd",
260 .drv_flags = RTE_PCI_DRV_DETACHABLE,
264 static struct rte_pci_id id_table;
267 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
268 const unsigned nb_rx_queues,
269 struct rte_ring *const tx_queues[],
270 const unsigned nb_tx_queues,
271 const unsigned numa_node)
273 struct rte_eth_dev_data *data = NULL;
274 struct rte_pci_device *pci_dev = NULL;
275 struct pmd_internals *internals = NULL;
276 struct rte_eth_dev *eth_dev = NULL;
280 /* do some parameter checking */
281 if (rx_queues == NULL && nb_rx_queues > 0) {
285 if (tx_queues == NULL && nb_tx_queues > 0) {
289 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
294 RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
297 /* now do all data allocation - for eth_dev structure, dummy pci driver
298 * and internal (private) data
300 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
306 data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues,
308 if (data->rx_queues == NULL) {
313 data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_tx_queues,
315 if (data->tx_queues == NULL) {
320 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
321 if (pci_dev == NULL) {
326 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
327 if (internals == NULL) {
332 /* reserve an ethdev entry */
333 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
334 if (eth_dev == NULL) {
339 /* now put it all together
340 * - store queue data in internals,
341 * - store numa_node info in eth_dev_data
342 * - point eth_dev_data to internals
343 * - and point eth_dev structure to new eth_dev_data structure
345 /* NOTE: we'll replace the data element, of originally allocated eth_dev
346 * so the rings are local per-process */
348 internals->nb_rx_queues = nb_rx_queues;
349 internals->nb_tx_queues = nb_tx_queues;
350 for (i = 0; i < nb_rx_queues; i++) {
351 internals->rx_ring_queues[i].rng = rx_queues[i];
352 data->rx_queues[i] = &internals->rx_ring_queues[i];
354 for (i = 0; i < nb_tx_queues; i++) {
355 internals->tx_ring_queues[i].rng = tx_queues[i];
356 data->tx_queues[i] = &internals->tx_ring_queues[i];
359 rte_ring_pmd.pci_drv.name = ring_ethdev_driver_name;
360 rte_ring_pmd.pci_drv.id_table = &id_table;
362 pci_dev->numa_node = numa_node;
363 pci_dev->driver = &rte_ring_pmd.pci_drv;
365 data->dev_private = internals;
366 data->port_id = eth_dev->data->port_id;
367 memmove(data->name, eth_dev->data->name, sizeof(data->name));
368 data->nb_rx_queues = (uint16_t)nb_rx_queues;
369 data->nb_tx_queues = (uint16_t)nb_tx_queues;
370 data->dev_link = pmd_link;
371 data->mac_addrs = &internals->address;
373 eth_dev->data = data;
374 eth_dev->driver = &rte_ring_pmd;
375 eth_dev->dev_ops = &ops;
376 eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
377 eth_dev->data->kdrv = RTE_KDRV_NONE;
378 eth_dev->data->drv_name = drivername;
379 eth_dev->data->numa_node = numa_node;
381 TAILQ_INIT(&(eth_dev->link_intr_cbs));
383 /* finally assign rx and tx ops */
384 eth_dev->rx_pkt_burst = eth_ring_rx;
385 eth_dev->tx_pkt_burst = eth_ring_tx;
387 return data->port_id;
390 rte_free(data->rx_queues);
391 rte_free(data->tx_queues);
400 rte_eth_from_ring(struct rte_ring *r)
402 return rte_eth_from_rings(r->name, &r, 1, &r, 1,
403 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
412 eth_dev_ring_create(const char *name, const unsigned numa_node,
413 enum dev_action action)
415 /* rx and tx are so-called from point of view of first port.
416 * They are inverted from the point of view of second port
418 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
420 char rng_name[RTE_RING_NAMESIZE];
421 unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
422 RTE_PMD_RING_MAX_TX_RINGS);
424 for (i = 0; i < num_rings; i++) {
425 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
426 rxtx[i] = (action == DEV_CREATE) ?
427 rte_ring_create(rng_name, 1024, numa_node,
428 RING_F_SP_ENQ|RING_F_SC_DEQ) :
429 rte_ring_lookup(rng_name);
434 if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node) < 0)
440 struct node_action_pair {
443 enum dev_action action;
446 struct node_action_list {
449 struct node_action_pair *list;
452 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
454 struct node_action_list *info = data;
461 name = strdup(value);
466 RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
470 node = strchr(name, ':');
472 RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
479 action = strchr(node, ':');
481 RTE_LOG(WARNING, PMD, "could not action value from %s", node);
489 * Need to do some sanity checking here
492 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
493 info->list[info->count].action = DEV_ATTACH;
494 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
495 info->list[info->count].action = DEV_CREATE;
500 info->list[info->count].node = strtol(node, &end, 10);
502 if ((errno != 0) || (*end != '\0')) {
503 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
507 snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
518 rte_pmd_ring_devinit(const char *name, const char *params)
520 struct rte_kvargs *kvlist = NULL;
522 struct node_action_list *info = NULL;
524 RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
526 if (params == NULL || params[0] == '\0') {
527 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
530 "Attach to pmd_ring for %s\n", name);
531 ret = eth_dev_ring_create(name, rte_socket_id(),
536 kvlist = rte_kvargs_parse(params, valid_arguments);
539 RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
540 " rings-backed ethernet device\n");
541 ret = eth_dev_ring_create(name, rte_socket_id(),
545 "Attach to pmd_ring for %s\n",
547 ret = eth_dev_ring_create(name, rte_socket_id(),
552 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
553 info = rte_zmalloc("struct node_action_list",
554 sizeof(struct node_action_list) +
555 (sizeof(struct node_action_pair) * ret),
561 info->list = (struct node_action_pair*)(info + 1);
563 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
569 for (info->count = 0; info->count < info->total; info->count++) {
570 ret = eth_dev_ring_create(name,
571 info->list[info->count].node,
572 info->list[info->count].action);
574 (info->list[info->count].action == DEV_CREATE)) {
576 "Attach to pmd_ring for %s\n",
578 ret = eth_dev_ring_create(name,
579 info->list[info->count].node,
587 rte_kvargs_free(kvlist);
593 rte_pmd_ring_devuninit(const char *name)
595 struct rte_eth_dev *eth_dev = NULL;
597 RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
602 /* find an ethdev entry */
603 eth_dev = rte_eth_dev_allocated(name);
607 eth_dev_stop(eth_dev);
608 rte_free(eth_dev->data->dev_private);
609 rte_free(eth_dev->data);
610 rte_free(eth_dev->pci_dev);
612 rte_eth_dev_release_port(eth_dev);
616 static struct rte_driver pmd_ring_drv = {
619 .init = rte_pmd_ring_devinit,
620 .uninit = rte_pmd_ring_devuninit,
623 PMD_REGISTER_DRIVER(pmd_ring_drv);