634da63211d30e79fece7f959813131face6b1f8
[dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include "rte_eth_ring.h"
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14
15 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
16 #define ETH_RING_ACTION_CREATE          "CREATE"
17 #define ETH_RING_ACTION_ATTACH          "ATTACH"
18 #define ETH_RING_INTERNAL_ARG           "internal"
19
20 static const char *valid_arguments[] = {
21         ETH_RING_NUMA_NODE_ACTION_ARG,
22         ETH_RING_INTERNAL_ARG,
23         NULL
24 };
25
26 struct ring_internal_args {
27         struct rte_ring * const *rx_queues;
28         const unsigned int nb_rx_queues;
29         struct rte_ring * const *tx_queues;
30         const unsigned int nb_tx_queues;
31         const unsigned int numa_node;
32         void *addr; /* self addr for sanity check */
33 };
34
35 enum dev_action {
36         DEV_CREATE,
37         DEV_ATTACH
38 };
39
40 struct ring_queue {
41         struct rte_ring *rng;
42         rte_atomic64_t rx_pkts;
43         rte_atomic64_t tx_pkts;
44 };
45
46 struct pmd_internals {
47         unsigned int max_rx_queues;
48         unsigned int max_tx_queues;
49
50         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
51         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
52
53         struct rte_ether_addr address;
54         enum dev_action action;
55 };
56
57 static struct rte_eth_link pmd_link = {
58         .link_speed = ETH_SPEED_NUM_10G,
59         .link_duplex = ETH_LINK_FULL_DUPLEX,
60         .link_status = ETH_LINK_DOWN,
61         .link_autoneg = ETH_LINK_FIXED,
62 };
63
64 static int eth_ring_logtype;
65
66 #define PMD_LOG(level, fmt, args...) \
67         rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
68                 "%s(): " fmt "\n", __func__, ##args)
69
70 static uint16_t
71 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
72 {
73         void **ptrs = (void *)&bufs[0];
74         struct ring_queue *r = q;
75         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
76                         ptrs, nb_bufs, NULL);
77         if (r->rng->flags & RING_F_SC_DEQ)
78                 r->rx_pkts.cnt += nb_rx;
79         else
80                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
81         return nb_rx;
82 }
83
84 static uint16_t
85 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 {
87         void **ptrs = (void *)&bufs[0];
88         struct ring_queue *r = q;
89         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
90                         ptrs, nb_bufs, NULL);
91         if (r->rng->flags & RING_F_SP_ENQ)
92                 r->tx_pkts.cnt += nb_tx;
93         else
94                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
95         return nb_tx;
96 }
97
98 static int
99 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
100
101 static int
102 eth_dev_start(struct rte_eth_dev *dev)
103 {
104         dev->data->dev_link.link_status = ETH_LINK_UP;
105         return 0;
106 }
107
108 static void
109 eth_dev_stop(struct rte_eth_dev *dev)
110 {
111         dev->data->dev_link.link_status = ETH_LINK_DOWN;
112 }
113
114 static int
115 eth_dev_set_link_down(struct rte_eth_dev *dev)
116 {
117         dev->data->dev_link.link_status = ETH_LINK_DOWN;
118         return 0;
119 }
120
121 static int
122 eth_dev_set_link_up(struct rte_eth_dev *dev)
123 {
124         dev->data->dev_link.link_status = ETH_LINK_UP;
125         return 0;
126 }
127
128 static int
129 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
130                                     uint16_t nb_rx_desc __rte_unused,
131                                     unsigned int socket_id __rte_unused,
132                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
133                                     struct rte_mempool *mb_pool __rte_unused)
134 {
135         struct pmd_internals *internals = dev->data->dev_private;
136
137         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
138         return 0;
139 }
140
141 static int
142 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
143                                     uint16_t nb_tx_desc __rte_unused,
144                                     unsigned int socket_id __rte_unused,
145                                     const struct rte_eth_txconf *tx_conf __rte_unused)
146 {
147         struct pmd_internals *internals = dev->data->dev_private;
148
149         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
150         return 0;
151 }
152
153
154 static void
155 eth_dev_info(struct rte_eth_dev *dev,
156              struct rte_eth_dev_info *dev_info)
157 {
158         struct pmd_internals *internals = dev->data->dev_private;
159
160         dev_info->max_mac_addrs = 1;
161         dev_info->max_rx_pktlen = (uint32_t)-1;
162         dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
163         dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
164         dev_info->min_rx_bufsize = 0;
165 }
166
167 static int
168 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
169 {
170         unsigned int i;
171         unsigned long rx_total = 0, tx_total = 0;
172         const struct pmd_internals *internal = dev->data->dev_private;
173
174         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
175                         i < dev->data->nb_rx_queues; i++) {
176                 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
177                 rx_total += stats->q_ipackets[i];
178         }
179
180         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
181                         i < dev->data->nb_tx_queues; i++) {
182                 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
183                 tx_total += stats->q_opackets[i];
184         }
185
186         stats->ipackets = rx_total;
187         stats->opackets = tx_total;
188
189         return 0;
190 }
191
192 static void
193 eth_stats_reset(struct rte_eth_dev *dev)
194 {
195         unsigned int i;
196         struct pmd_internals *internal = dev->data->dev_private;
197
198         for (i = 0; i < dev->data->nb_rx_queues; i++)
199                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
200         for (i = 0; i < dev->data->nb_tx_queues; i++)
201                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
202 }
203
204 static void
205 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
206         uint32_t index __rte_unused)
207 {
208 }
209
210 static int
211 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
212         struct rte_ether_addr *mac_addr __rte_unused,
213         uint32_t index __rte_unused,
214         uint32_t vmdq __rte_unused)
215 {
216         return 0;
217 }
218
219 static void
220 eth_queue_release(void *q __rte_unused) { ; }
221 static int
222 eth_link_update(struct rte_eth_dev *dev __rte_unused,
223                 int wait_to_complete __rte_unused) { return 0; }
224
225 static const struct eth_dev_ops ops = {
226         .dev_start = eth_dev_start,
227         .dev_stop = eth_dev_stop,
228         .dev_set_link_up = eth_dev_set_link_up,
229         .dev_set_link_down = eth_dev_set_link_down,
230         .dev_configure = eth_dev_configure,
231         .dev_infos_get = eth_dev_info,
232         .rx_queue_setup = eth_rx_queue_setup,
233         .tx_queue_setup = eth_tx_queue_setup,
234         .rx_queue_release = eth_queue_release,
235         .tx_queue_release = eth_queue_release,
236         .link_update = eth_link_update,
237         .stats_get = eth_stats_get,
238         .stats_reset = eth_stats_reset,
239         .mac_addr_remove = eth_mac_addr_remove,
240         .mac_addr_add = eth_mac_addr_add,
241 };
242
243 static int
244 do_eth_dev_ring_create(const char *name,
245                 struct rte_ring * const rx_queues[],
246                 const unsigned int nb_rx_queues,
247                 struct rte_ring *const tx_queues[],
248                 const unsigned int nb_tx_queues,
249                 const unsigned int numa_node, enum dev_action action,
250                 struct rte_eth_dev **eth_dev_p)
251 {
252         struct rte_eth_dev_data *data = NULL;
253         struct pmd_internals *internals = NULL;
254         struct rte_eth_dev *eth_dev = NULL;
255         void **rx_queues_local = NULL;
256         void **tx_queues_local = NULL;
257         unsigned int i;
258
259         PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
260                         numa_node);
261
262         rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
263                                             sizeof(void *), 0, numa_node);
264         if (rx_queues_local == NULL) {
265                 rte_errno = ENOMEM;
266                 goto error;
267         }
268
269         tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
270                                             sizeof(void *), 0, numa_node);
271         if (tx_queues_local == NULL) {
272                 rte_errno = ENOMEM;
273                 goto error;
274         }
275
276         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
277         if (internals == NULL) {
278                 rte_errno = ENOMEM;
279                 goto error;
280         }
281
282         /* reserve an ethdev entry */
283         eth_dev = rte_eth_dev_allocate(name);
284         if (eth_dev == NULL) {
285                 rte_errno = ENOSPC;
286                 goto error;
287         }
288
289         /* now put it all together
290          * - store queue data in internals,
291          * - store numa_node info in eth_dev_data
292          * - point eth_dev_data to internals
293          * - and point eth_dev structure to new eth_dev_data structure
294          */
295
296         data = eth_dev->data;
297         data->rx_queues = rx_queues_local;
298         data->tx_queues = tx_queues_local;
299
300         internals->action = action;
301         internals->max_rx_queues = nb_rx_queues;
302         internals->max_tx_queues = nb_tx_queues;
303         for (i = 0; i < nb_rx_queues; i++) {
304                 internals->rx_ring_queues[i].rng = rx_queues[i];
305                 data->rx_queues[i] = &internals->rx_ring_queues[i];
306         }
307         for (i = 0; i < nb_tx_queues; i++) {
308                 internals->tx_ring_queues[i].rng = tx_queues[i];
309                 data->tx_queues[i] = &internals->tx_ring_queues[i];
310         }
311
312         data->dev_private = internals;
313         data->nb_rx_queues = (uint16_t)nb_rx_queues;
314         data->nb_tx_queues = (uint16_t)nb_tx_queues;
315         data->dev_link = pmd_link;
316         data->mac_addrs = &internals->address;
317
318         eth_dev->dev_ops = &ops;
319         data->kdrv = RTE_KDRV_NONE;
320         data->numa_node = numa_node;
321
322         /* finally assign rx and tx ops */
323         eth_dev->rx_pkt_burst = eth_ring_rx;
324         eth_dev->tx_pkt_burst = eth_ring_tx;
325
326         rte_eth_dev_probing_finish(eth_dev);
327         *eth_dev_p = eth_dev;
328
329         return data->port_id;
330
331 error:
332         rte_free(rx_queues_local);
333         rte_free(tx_queues_local);
334         rte_free(internals);
335
336         return -1;
337 }
338
339 int
340 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
341                 const unsigned int nb_rx_queues,
342                 struct rte_ring *const tx_queues[],
343                 const unsigned int nb_tx_queues,
344                 const unsigned int numa_node)
345 {
346         struct ring_internal_args args = {
347                 .rx_queues = rx_queues,
348                 .nb_rx_queues = nb_rx_queues,
349                 .tx_queues = tx_queues,
350                 .nb_tx_queues = nb_tx_queues,
351                 .numa_node = numa_node,
352                 .addr = &args,
353         };
354         char args_str[32];
355         char ring_name[RTE_RING_NAMESIZE];
356         uint16_t port_id = RTE_MAX_ETHPORTS;
357         int ret;
358
359         /* do some parameter checking */
360         if (rx_queues == NULL && nb_rx_queues > 0) {
361                 rte_errno = EINVAL;
362                 return -1;
363         }
364         if (tx_queues == NULL && nb_tx_queues > 0) {
365                 rte_errno = EINVAL;
366                 return -1;
367         }
368         if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
369                 rte_errno = EINVAL;
370                 return -1;
371         }
372
373         snprintf(args_str, sizeof(args_str), "%s=%p",
374                  ETH_RING_INTERNAL_ARG, &args);
375
376         ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
377         if (ret >= (int)sizeof(ring_name)) {
378                 rte_errno = ENAMETOOLONG;
379                 return -1;
380         }
381
382         ret = rte_vdev_init(ring_name, args_str);
383         if (ret) {
384                 rte_errno = EINVAL;
385                 return -1;
386         }
387
388         ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
389         if (ret) {
390                 rte_errno = ENODEV;
391                 return -1;
392         }
393
394         return port_id;
395 }
396
397 int
398 rte_eth_from_ring(struct rte_ring *r)
399 {
400         return rte_eth_from_rings(r->name, &r, 1, &r, 1,
401                         r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
402 }
403
404 static int
405 eth_dev_ring_create(const char *name, const unsigned int numa_node,
406                 enum dev_action action, struct rte_eth_dev **eth_dev)
407 {
408         /* rx and tx are so-called from point of view of first port.
409          * They are inverted from the point of view of second port
410          */
411         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
412         unsigned int i;
413         char rng_name[RTE_RING_NAMESIZE];
414         unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
415                         RTE_PMD_RING_MAX_TX_RINGS);
416
417         for (i = 0; i < num_rings; i++) {
418                 int cc;
419
420                 cc = snprintf(rng_name, sizeof(rng_name),
421                               "ETH_RXTX%u_%s", i, name);
422                 if (cc >= (int)sizeof(rng_name)) {
423                         rte_errno = ENAMETOOLONG;
424                         return -1;
425                 }
426
427                 rxtx[i] = (action == DEV_CREATE) ?
428                                 rte_ring_create(rng_name, 1024, numa_node,
429                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
430                                 rte_ring_lookup(rng_name);
431                 if (rxtx[i] == NULL)
432                         return -1;
433         }
434
435         if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
436                 numa_node, action, eth_dev) < 0)
437                 return -1;
438
439         return 0;
440 }
441
442 struct node_action_pair {
443         char name[PATH_MAX];
444         unsigned int node;
445         enum dev_action action;
446 };
447
448 struct node_action_list {
449         unsigned int total;
450         unsigned int count;
451         struct node_action_pair *list;
452 };
453
454 static int parse_kvlist(const char *key __rte_unused,
455                         const char *value, void *data)
456 {
457         struct node_action_list *info = data;
458         int ret;
459         char *name;
460         char *action;
461         char *node;
462         char *end;
463
464         name = strdup(value);
465
466         ret = -EINVAL;
467
468         if (!name) {
469                 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
470                 goto out;
471         }
472
473         node = strchr(name, ':');
474         if (!node) {
475                 PMD_LOG(WARNING, "could not parse node value from %s",
476                         name);
477                 goto out;
478         }
479
480         *node = '\0';
481         node++;
482
483         action = strchr(node, ':');
484         if (!action) {
485                 PMD_LOG(WARNING, "could not parse action value from %s",
486                         node);
487                 goto out;
488         }
489
490         *action = '\0';
491         action++;
492
493         /*
494          * Need to do some sanity checking here
495          */
496
497         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
498                 info->list[info->count].action = DEV_ATTACH;
499         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
500                 info->list[info->count].action = DEV_CREATE;
501         else
502                 goto out;
503
504         errno = 0;
505         info->list[info->count].node = strtol(node, &end, 10);
506
507         if ((errno != 0) || (*end != '\0')) {
508                 PMD_LOG(WARNING,
509                         "node value %s is unparseable as a number", node);
510                 goto out;
511         }
512
513         strlcpy(info->list[info->count].name, name,
514                 sizeof(info->list[info->count].name));
515
516         info->count++;
517
518         ret = 0;
519 out:
520         free(name);
521         return ret;
522 }
523
524 static int
525 parse_internal_args(const char *key __rte_unused, const char *value,
526                 void *data)
527 {
528         struct ring_internal_args **internal_args = data;
529         void *args;
530
531         sscanf(value, "%p", &args);
532
533         *internal_args = args;
534
535         if ((*internal_args)->addr != args)
536                 return -1;
537
538         return 0;
539 }
540
541 static int
542 rte_pmd_ring_probe(struct rte_vdev_device *dev)
543 {
544         const char *name, *params;
545         struct rte_kvargs *kvlist = NULL;
546         int ret = 0;
547         struct node_action_list *info = NULL;
548         struct rte_eth_dev *eth_dev = NULL;
549         struct ring_internal_args *internal_args;
550
551         name = rte_vdev_device_name(dev);
552         params = rte_vdev_device_args(dev);
553
554         PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
555
556         if (params == NULL || params[0] == '\0') {
557                 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
558                                 &eth_dev);
559                 if (ret == -1) {
560                         PMD_LOG(INFO,
561                                 "Attach to pmd_ring for %s", name);
562                         ret = eth_dev_ring_create(name, rte_socket_id(),
563                                                   DEV_ATTACH, &eth_dev);
564                 }
565         } else {
566                 kvlist = rte_kvargs_parse(params, valid_arguments);
567
568                 if (!kvlist) {
569                         PMD_LOG(INFO,
570                                 "Ignoring unsupported parameters when creatingrings-backed ethernet device");
571                         ret = eth_dev_ring_create(name, rte_socket_id(),
572                                                   DEV_CREATE, &eth_dev);
573                         if (ret == -1) {
574                                 PMD_LOG(INFO,
575                                         "Attach to pmd_ring for %s",
576                                         name);
577                                 ret = eth_dev_ring_create(name, rte_socket_id(),
578                                                           DEV_ATTACH, &eth_dev);
579                         }
580
581                         if (eth_dev)
582                                 eth_dev->device = &dev->device;
583
584                         return ret;
585                 }
586
587                 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
588                         ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
589                                                  parse_internal_args,
590                                                  &internal_args);
591                         if (ret < 0)
592                                 goto out_free;
593
594                         ret = do_eth_dev_ring_create(name,
595                                 internal_args->rx_queues,
596                                 internal_args->nb_rx_queues,
597                                 internal_args->tx_queues,
598                                 internal_args->nb_tx_queues,
599                                 internal_args->numa_node,
600                                 DEV_ATTACH,
601                                 &eth_dev);
602                         if (ret >= 0)
603                                 ret = 0;
604                 } else {
605                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
606                         info = rte_zmalloc("struct node_action_list",
607                                            sizeof(struct node_action_list) +
608                                            (sizeof(struct node_action_pair) * ret),
609                                            0);
610                         if (!info)
611                                 goto out_free;
612
613                         info->total = ret;
614                         info->list = (struct node_action_pair *)(info + 1);
615
616                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
617                                                  parse_kvlist, info);
618
619                         if (ret < 0)
620                                 goto out_free;
621
622                         for (info->count = 0; info->count < info->total; info->count++) {
623                                 ret = eth_dev_ring_create(info->list[info->count].name,
624                                                           info->list[info->count].node,
625                                                           info->list[info->count].action,
626                                                           &eth_dev);
627                                 if ((ret == -1) &&
628                                     (info->list[info->count].action == DEV_CREATE)) {
629                                         PMD_LOG(INFO,
630                                                 "Attach to pmd_ring for %s",
631                                                 name);
632                                         ret = eth_dev_ring_create(name,
633                                                         info->list[info->count].node,
634                                                         DEV_ATTACH,
635                                                         &eth_dev);
636                                 }
637                         }
638                 }
639         }
640
641         if (eth_dev)
642                 eth_dev->device = &dev->device;
643
644 out_free:
645         rte_kvargs_free(kvlist);
646         rte_free(info);
647         return ret;
648 }
649
650 static int
651 rte_pmd_ring_remove(struct rte_vdev_device *dev)
652 {
653         const char *name = rte_vdev_device_name(dev);
654         struct rte_eth_dev *eth_dev = NULL;
655         struct pmd_internals *internals = NULL;
656         struct ring_queue *r = NULL;
657         uint16_t i;
658
659         PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
660
661         if (name == NULL)
662                 return -EINVAL;
663
664         /* find an ethdev entry */
665         eth_dev = rte_eth_dev_allocated(name);
666         if (eth_dev == NULL)
667                 return -ENODEV;
668
669         eth_dev_stop(eth_dev);
670
671         internals = eth_dev->data->dev_private;
672         if (internals->action == DEV_CREATE) {
673                 /*
674                  * it is only necessary to delete the rings in rx_queues because
675                  * they are the same used in tx_queues
676                  */
677                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
678                         r = eth_dev->data->rx_queues[i];
679                         rte_ring_free(r->rng);
680                 }
681         }
682
683         /* mac_addrs must not be freed alone because part of dev_private */
684         eth_dev->data->mac_addrs = NULL;
685         rte_eth_dev_release_port(eth_dev);
686         return 0;
687 }
688
689 static struct rte_vdev_driver pmd_ring_drv = {
690         .probe = rte_pmd_ring_probe,
691         .remove = rte_pmd_ring_remove,
692 };
693
694 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
695 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
696 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
697         ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
698
699 RTE_INIT(eth_ring_init_log)
700 {
701         eth_ring_logtype = rte_log_register("pmd.net.ring");
702         if (eth_ring_logtype >= 0)
703                 rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE);
704 }