ethdev: remove old offload API
[dpdk.git] / app / test-eventdev / test_pipeline_common.c
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5
6 #include "test_pipeline_common.h"
7
8 static int32_t
9 pipeline_event_tx_burst_service_func(void *args)
10 {
11
12         int i;
13         struct tx_service_data *tx = args;
14         const uint8_t dev = tx->dev_id;
15         const uint8_t port = tx->port_id;
16         struct rte_event ev[BURST_SIZE + 1];
17
18         uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
19
20         if (!nb_rx) {
21                 for (i = 0; i < tx->nb_ethports; i++)
22                         rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]);
23                 return 0;
24         }
25
26         for (i = 0; i < nb_rx; i++) {
27                 struct rte_mbuf *m = ev[i].mbuf;
28                 rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m);
29         }
30         tx->processed_pkts += nb_rx;
31
32         return 0;
33 }
34
35 static int32_t
36 pipeline_event_tx_service_func(void *args)
37 {
38
39         int i;
40         struct tx_service_data *tx = args;
41         const uint8_t dev = tx->dev_id;
42         const uint8_t port = tx->port_id;
43         struct rte_event ev;
44
45         uint16_t nb_rx = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
46
47         if (!nb_rx) {
48                 for (i = 0; i < tx->nb_ethports; i++)
49                         rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]);
50                 return 0;
51         }
52
53         struct rte_mbuf *m = ev.mbuf;
54         rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m);
55         tx->processed_pkts++;
56
57         return 0;
58 }
59
60 int
61 pipeline_test_result(struct evt_test *test, struct evt_options *opt)
62 {
63         RTE_SET_USED(opt);
64         int i;
65         uint64_t total = 0;
66         struct test_pipeline *t = evt_test_priv(test);
67
68         printf("Packet distribution across worker cores :\n");
69         for (i = 0; i < t->nb_workers; i++)
70                 total += t->worker[i].processed_pkts;
71         for (i = 0; i < t->nb_workers; i++)
72                 printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
73                                 CLGRN" %3.2f\n"CLNRM, i,
74                                 t->worker[i].processed_pkts,
75                                 (((double)t->worker[i].processed_pkts)/total)
76                                 * 100);
77         return t->result;
78 }
79
80 void
81 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues)
82 {
83         evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
84         evt_dump_worker_lcores(opt);
85         evt_dump_nb_stages(opt);
86         evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt));
87         evt_dump("nb_evdev_queues", "%d", nb_queues);
88         evt_dump_queue_priority(opt);
89         evt_dump_sched_type_list(opt);
90         evt_dump_producer_type(opt);
91 }
92
93 static inline uint64_t
94 processed_pkts(struct test_pipeline *t)
95 {
96         uint8_t i;
97         uint64_t total = 0;
98
99         rte_smp_rmb();
100         if (t->mt_unsafe)
101                 total = t->tx_service.processed_pkts;
102         else
103                 for (i = 0; i < t->nb_workers; i++)
104                         total += t->worker[i].processed_pkts;
105
106         return total;
107 }
108
109 int
110 pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
111                 int (*worker)(void *))
112 {
113         int ret, lcore_id;
114         struct test_pipeline *t = evt_test_priv(test);
115
116         int port_idx = 0;
117         /* launch workers */
118         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
119                 if (!(opt->wlcores[lcore_id]))
120                         continue;
121
122                 ret = rte_eal_remote_launch(worker,
123                                  &t->worker[port_idx], lcore_id);
124                 if (ret) {
125                         evt_err("failed to launch worker %d", lcore_id);
126                         return ret;
127                 }
128                 port_idx++;
129         }
130
131         uint64_t perf_cycles = rte_get_timer_cycles();
132         const uint64_t perf_sample = rte_get_timer_hz();
133
134         static float total_mpps;
135         static uint64_t samples;
136
137         uint64_t prev_pkts = 0;
138
139         while (t->done == false) {
140                 const uint64_t new_cycles = rte_get_timer_cycles();
141
142                 if ((new_cycles - perf_cycles) > perf_sample) {
143                         const uint64_t curr_pkts = processed_pkts(t);
144
145                         float mpps = (float)(curr_pkts - prev_pkts)/1000000;
146
147                         prev_pkts = curr_pkts;
148                         perf_cycles = new_cycles;
149                         total_mpps += mpps;
150                         ++samples;
151                         printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
152                                         mpps, total_mpps/samples);
153                         fflush(stdout);
154                 }
155         }
156         printf("\n");
157         return 0;
158 }
159
160 int
161 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
162 {
163         unsigned int lcores;
164         /*
165          * N worker + 1 master
166          */
167         lcores = 2;
168
169         if (!rte_eth_dev_count_avail()) {
170                 evt_err("test needs minimum 1 ethernet dev");
171                 return -1;
172         }
173
174         if (rte_lcore_count() < lcores) {
175                 evt_err("test need minimum %d lcores", lcores);
176                 return -1;
177         }
178
179         /* Validate worker lcores */
180         if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
181                 evt_err("worker lcores overlaps with master lcore");
182                 return -1;
183         }
184         if (evt_has_disabled_lcore(opt->wlcores)) {
185                 evt_err("one or more workers lcores are not enabled");
186                 return -1;
187         }
188         if (!evt_has_active_lcore(opt->wlcores)) {
189                 evt_err("minimum one worker is required");
190                 return -1;
191         }
192
193         if (nb_queues > EVT_MAX_QUEUES) {
194                 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
195                 return -1;
196         }
197         if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) {
198                 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
199                 return -1;
200         }
201
202         if (evt_has_invalid_stage(opt))
203                 return -1;
204
205         if (evt_has_invalid_sched_type(opt))
206                 return -1;
207
208         return 0;
209 }
210
211 #define NB_RX_DESC                      128
212 #define NB_TX_DESC                      512
213 int
214 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
215 {
216         uint16_t i;
217         uint8_t nb_queues = 1;
218         uint8_t mt_state = 0;
219         struct test_pipeline *t = evt_test_priv(test);
220         struct rte_eth_rxconf rx_conf;
221         struct rte_eth_conf port_conf = {
222                 .rxmode = {
223                         .mq_mode = ETH_MQ_RX_RSS,
224                         .max_rx_pkt_len = ETHER_MAX_LEN,
225                         .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
226                 },
227                 .rx_adv_conf = {
228                         .rss_conf = {
229                                 .rss_key = NULL,
230                                 .rss_hf = ETH_RSS_IP,
231                         },
232                 },
233         };
234
235         RTE_SET_USED(opt);
236         if (!rte_eth_dev_count_avail()) {
237                 evt_err("No ethernet ports found.\n");
238                 return -ENODEV;
239         }
240
241         RTE_ETH_FOREACH_DEV(i) {
242                 struct rte_eth_dev_info dev_info;
243
244                 memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
245                 rte_eth_dev_info_get(i, &dev_info);
246                 mt_state = !(dev_info.tx_offload_capa &
247                                 DEV_TX_OFFLOAD_MT_LOCKFREE);
248                 rx_conf = dev_info.default_rxconf;
249                 rx_conf.offloads = port_conf.rxmode.offloads;
250
251                 if (rte_eth_dev_configure(i, nb_queues, nb_queues,
252                                         &port_conf)
253                                 < 0) {
254                         evt_err("Failed to configure eth port [%d]\n", i);
255                         return -EINVAL;
256                 }
257
258                 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
259                                 rte_socket_id(), &rx_conf, t->pool) < 0) {
260                         evt_err("Failed to setup eth port [%d] rx_queue: %d.\n",
261                                         i, 0);
262                         return -EINVAL;
263                 }
264                 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
265                                         rte_socket_id(), NULL) < 0) {
266                         evt_err("Failed to setup eth port [%d] tx_queue: %d.\n",
267                                         i, 0);
268                         return -EINVAL;
269                 }
270
271                 t->mt_unsafe |= mt_state;
272                 t->tx_service.tx_buf[i] =
273                         rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(BURST_SIZE), 0);
274                 if (t->tx_service.tx_buf[i] == NULL)
275                         rte_panic("Unable to allocate Tx buffer memory.");
276                 rte_eth_promiscuous_enable(i);
277         }
278
279         return 0;
280 }
281
282 int
283 pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
284                 uint8_t *queue_arr, uint8_t nb_queues,
285                 const struct rte_event_port_conf p_conf)
286 {
287         int i;
288         int ret;
289         uint8_t port;
290         struct test_pipeline *t = evt_test_priv(test);
291
292
293         /* setup one port per worker, linking to all queues */
294         for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) {
295                 struct worker_data *w = &t->worker[port];
296
297                 w->dev_id = opt->dev_id;
298                 w->port_id = port;
299                 w->t = t;
300                 w->processed_pkts = 0;
301
302                 ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
303                 if (ret) {
304                         evt_err("failed to setup port %d", port);
305                         return ret;
306                 }
307
308                 if (queue_arr == NULL) {
309                         if (rte_event_port_link(opt->dev_id, port, NULL, NULL,
310                                                 0) != nb_queues)
311                                 goto link_fail;
312                 } else {
313                         for (i = 0; i < nb_queues; i++) {
314                                 if (rte_event_port_link(opt->dev_id, port,
315                                                 &queue_arr[i], NULL, 1) != 1)
316                                         goto link_fail;
317                         }
318                 }
319         }
320
321         return 0;
322
323 link_fail:
324         evt_err("failed to link all queues to port %d", port);
325         return -EINVAL;
326 }
327
328 int
329 pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
330                 struct rte_event_port_conf prod_conf)
331 {
332         int ret = 0;
333         uint16_t prod;
334         struct rte_event_eth_rx_adapter_queue_conf queue_conf;
335
336         memset(&queue_conf, 0,
337                         sizeof(struct rte_event_eth_rx_adapter_queue_conf));
338         queue_conf.ev.sched_type = opt->sched_type_list[0];
339         RTE_ETH_FOREACH_DEV(prod) {
340                 uint32_t cap;
341
342                 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
343                                 prod, &cap);
344                 if (ret) {
345                         evt_err("failed to get event rx adapter[%d]"
346                                         " capabilities",
347                                         opt->dev_id);
348                         return ret;
349                 }
350                 queue_conf.ev.queue_id = prod * stride;
351                 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
352                                 &prod_conf);
353                 if (ret) {
354                         evt_err("failed to create rx adapter[%d]", prod);
355                         return ret;
356                 }
357                 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
358                                 &queue_conf);
359                 if (ret) {
360                         evt_err("failed to add rx queues to adapter[%d]", prod);
361                         return ret;
362                 }
363
364                 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
365                         uint32_t service_id;
366
367                         rte_event_eth_rx_adapter_service_id_get(prod,
368                                         &service_id);
369                         ret = evt_service_setup(service_id);
370                         if (ret) {
371                                 evt_err("Failed to setup service core"
372                                                 " for Rx adapter\n");
373                                 return ret;
374                         }
375                 }
376
377                 ret = rte_eth_dev_start(prod);
378                 if (ret) {
379                         evt_err("Ethernet dev [%d] failed to start."
380                                         " Using synthetic producer", prod);
381                         return ret;
382                 }
383
384                 ret = rte_event_eth_rx_adapter_start(prod);
385                 if (ret) {
386                         evt_err("Rx adapter[%d] start failed", prod);
387                         return ret;
388                 }
389                 printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__,
390                                 prod, prod);
391         }
392
393         return ret;
394 }
395
396 int
397 pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt,
398                 uint8_t tx_queue_id, uint8_t tx_port_id,
399                 const struct rte_event_port_conf p_conf)
400 {
401         int ret;
402         struct rte_service_spec serv;
403         struct test_pipeline *t = evt_test_priv(test);
404         struct tx_service_data *tx = &t->tx_service;
405
406         ret = rte_event_port_setup(opt->dev_id, tx_port_id, &p_conf);
407         if (ret) {
408                 evt_err("failed to setup port %d", tx_port_id);
409                 return ret;
410         }
411
412         if (rte_event_port_link(opt->dev_id, tx_port_id, &tx_queue_id,
413                                 NULL, 1) != 1) {
414                 evt_err("failed to link queues to port %d", tx_port_id);
415                 return -EINVAL;
416         }
417
418         tx->dev_id = opt->dev_id;
419         tx->queue_id = tx_queue_id;
420         tx->port_id = tx_port_id;
421         tx->nb_ethports = rte_eth_dev_count_avail();
422         tx->t = t;
423
424         /* Register Tx service */
425         memset(&serv, 0, sizeof(struct rte_service_spec));
426         snprintf(serv.name, sizeof(serv.name), "Tx_service");
427
428         if (evt_has_burst_mode(opt->dev_id))
429                 serv.callback = pipeline_event_tx_burst_service_func;
430         else
431                 serv.callback = pipeline_event_tx_service_func;
432
433         serv.callback_userdata = (void *)tx;
434         ret = rte_service_component_register(&serv, &tx->service_id);
435         if (ret) {
436                 evt_err("failed to register Tx service");
437                 return ret;
438         }
439
440         ret = evt_service_setup(tx->service_id);
441         if (ret) {
442                 evt_err("Failed to setup service core for Tx service\n");
443                 return ret;
444         }
445
446         rte_service_runstate_set(tx->service_id, 1);
447
448         return 0;
449 }
450
451
452 void
453 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
454 {
455         uint16_t i;
456         RTE_SET_USED(test);
457         RTE_SET_USED(opt);
458         struct test_pipeline *t = evt_test_priv(test);
459
460         if (t->mt_unsafe) {
461                 rte_service_component_runstate_set(t->tx_service.service_id, 0);
462                 rte_service_runstate_set(t->tx_service.service_id, 0);
463                 rte_service_component_unregister(t->tx_service.service_id);
464         }
465
466         RTE_ETH_FOREACH_DEV(i) {
467                 rte_event_eth_rx_adapter_stop(i);
468                 rte_eth_dev_stop(i);
469                 rte_eth_dev_close(i);
470         }
471 }
472
473 void
474 pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
475 {
476         RTE_SET_USED(test);
477
478         rte_event_dev_stop(opt->dev_id);
479         rte_event_dev_close(opt->dev_id);
480 }
481
482 int
483 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
484 {
485         struct test_pipeline *t = evt_test_priv(test);
486
487         t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
488                         opt->pool_sz, /* number of elements*/
489                         512, /* cache size*/
490                         0,
491                         RTE_MBUF_DEFAULT_BUF_SIZE,
492                         opt->socket_id); /* flags */
493
494         if (t->pool == NULL) {
495                 evt_err("failed to create mempool");
496                 return -ENOMEM;
497         }
498
499         return 0;
500 }
501
502 void
503 pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
504 {
505         RTE_SET_USED(opt);
506         struct test_pipeline *t = evt_test_priv(test);
507
508         rte_mempool_free(t->pool);
509 }
510
511 int
512 pipeline_test_setup(struct evt_test *test, struct evt_options *opt)
513 {
514         void *test_pipeline;
515
516         test_pipeline = rte_zmalloc_socket(test->name,
517                         sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE,
518                         opt->socket_id);
519         if (test_pipeline  == NULL) {
520                 evt_err("failed to allocate test_pipeline memory");
521                 goto nomem;
522         }
523         test->test_priv = test_pipeline;
524
525         struct test_pipeline *t = evt_test_priv(test);
526
527         t->nb_workers = evt_nr_active_lcores(opt->wlcores);
528         t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores);
529         t->done = false;
530         t->nb_flows = opt->nb_flows;
531         t->result = EVT_TEST_FAILED;
532         t->opt = opt;
533         opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR;
534         memcpy(t->sched_type_list, opt->sched_type_list,
535                         sizeof(opt->sched_type_list));
536         return 0;
537 nomem:
538         return -ENOMEM;
539 }
540
541 void
542 pipeline_test_destroy(struct evt_test *test, struct evt_options *opt)
543 {
544         RTE_SET_USED(opt);
545
546         rte_free(test->test_priv);
547 }