1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
8 #include <netinet/in.h>
9 #ifdef RTE_EXEC_ENV_LINUXAPP
11 #include <linux/if_tun.h>
14 #include <sys/ioctl.h>
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
19 #include <rte_ether.h>
22 #include <rte_malloc.h>
23 #include <rte_bus_pci.h>
27 #include "pipeline_common_fe.h"
28 #include "pipeline_master.h"
29 #include "thread_fe.h"
31 #define APP_NAME_SIZE 32
33 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
36 app_init_core_map(struct app_params *app)
38 APP_LOG(app, HIGH, "Initializing CPU core map ...");
39 app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
42 if (app->core_map == NULL)
43 rte_panic("Cannot create CPU core map\n");
45 if (app->log_level >= APP_LOG_LEVEL_LOW)
46 cpu_core_map_print(app->core_map);
49 /* Core Mask String in Hex Representation */
50 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
53 app_init_core_mask(struct app_params *app)
56 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
58 for (i = 0; i < app->n_pipelines; i++) {
59 struct app_pipeline_params *p = &app->pipeline_params[i];
62 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
68 rte_panic("Cannot create CPU core mask\n");
70 app_core_enable_in_core_mask(app, lcore_id);
73 app_core_build_core_mask_string(app, core_mask_str);
74 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
78 app_init_eal(struct app_params *app)
81 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
82 struct app_eal_params *p = &app->eal_params;
87 app->eal_argv[n_args++] = strdup(app->app_name);
89 app_core_build_core_mask_string(app, core_mask_str);
90 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
91 app->eal_argv[n_args++] = strdup(buffer);
94 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
95 app->eal_argv[n_args++] = strdup(buffer);
98 if (p->master_lcore_present) {
101 "--master-lcore=%" PRIu32,
103 app->eal_argv[n_args++] = strdup(buffer);
106 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
107 app->eal_argv[n_args++] = strdup(buffer);
109 if (p->memory_present) {
110 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
111 app->eal_argv[n_args++] = strdup(buffer);
114 if (p->ranks_present) {
115 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
116 app->eal_argv[n_args++] = strdup(buffer);
119 for (i = 0; i < APP_MAX_LINKS; i++) {
120 if (p->pci_blacklist[i] == NULL)
125 "--pci-blacklist=%s",
126 p->pci_blacklist[i]);
127 app->eal_argv[n_args++] = strdup(buffer);
130 if (app->port_mask != 0)
131 for (i = 0; i < APP_MAX_LINKS; i++) {
132 if (p->pci_whitelist[i] == NULL)
137 "--pci-whitelist=%s",
138 p->pci_whitelist[i]);
139 app->eal_argv[n_args++] = strdup(buffer);
142 for (i = 0; i < app->n_links; i++) {
143 char *pci_bdf = app->link_params[i].pci_bdf;
147 "--pci-whitelist=%s",
149 app->eal_argv[n_args++] = strdup(buffer);
152 for (i = 0; i < APP_MAX_LINKS; i++) {
153 if (p->vdev[i] == NULL)
160 app->eal_argv[n_args++] = strdup(buffer);
163 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
164 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
165 app->eal_argv[n_args++] = strdup(buffer);
173 app->eal_argv[n_args++] = strdup(buffer);
177 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
178 app->eal_argv[n_args++] = strdup(buffer);
181 if (p->log_level_present) {
184 "--log-level=%" PRIu32,
186 app->eal_argv[n_args++] = strdup(buffer);
189 if ((p->version_present) && p->version) {
190 snprintf(buffer, sizeof(buffer), "-v");
191 app->eal_argv[n_args++] = strdup(buffer);
194 if ((p->help_present) && p->help) {
195 snprintf(buffer, sizeof(buffer), "--help");
196 app->eal_argv[n_args++] = strdup(buffer);
199 if ((p->no_huge_present) && p->no_huge) {
200 snprintf(buffer, sizeof(buffer), "--no-huge");
201 app->eal_argv[n_args++] = strdup(buffer);
204 if ((p->no_pci_present) && p->no_pci) {
205 snprintf(buffer, sizeof(buffer), "--no-pci");
206 app->eal_argv[n_args++] = strdup(buffer);
209 if ((p->no_hpet_present) && p->no_hpet) {
210 snprintf(buffer, sizeof(buffer), "--no-hpet");
211 app->eal_argv[n_args++] = strdup(buffer);
214 if ((p->no_shconf_present) && p->no_shconf) {
215 snprintf(buffer, sizeof(buffer), "--no-shconf");
216 app->eal_argv[n_args++] = strdup(buffer);
220 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
221 app->eal_argv[n_args++] = strdup(buffer);
229 app->eal_argv[n_args++] = strdup(buffer);
233 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
234 app->eal_argv[n_args++] = strdup(buffer);
237 if (p->file_prefix) {
242 app->eal_argv[n_args++] = strdup(buffer);
245 if (p->base_virtaddr) {
248 "--base-virtaddr=%s",
250 app->eal_argv[n_args++] = strdup(buffer);
253 if ((p->create_uio_dev_present) && p->create_uio_dev) {
254 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
255 app->eal_argv[n_args++] = strdup(buffer);
263 app->eal_argv[n_args++] = strdup(buffer);
266 snprintf(buffer, sizeof(buffer), "--");
267 app->eal_argv[n_args++] = strdup(buffer);
269 app->eal_argc = n_args;
271 APP_LOG(app, HIGH, "Initializing EAL ...");
272 if (app->log_level >= APP_LOG_LEVEL_LOW) {
275 fprintf(stdout, "[APP] EAL arguments: \"");
276 for (i = 1; i < app->eal_argc; i++)
277 fprintf(stdout, "%s ", app->eal_argv[i]);
278 fprintf(stdout, "\"\n");
281 status = rte_eal_init(app->eal_argc, app->eal_argv);
283 rte_panic("EAL init error\n");
287 app_init_mempool(struct app_params *app)
291 for (i = 0; i < app->n_mempools; i++) {
292 struct app_mempool_params *p = &app->mempool_params[i];
294 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
295 app->mempool[i] = rte_pktmbuf_pool_create(
301 sizeof(struct rte_mbuf), /* mbuf data size */
304 if (app->mempool[i] == NULL)
305 rte_panic("%s init error\n", p->name);
310 app_link_filter_arp_add(struct app_link_params *link)
312 struct rte_eth_ethertype_filter filter = {
313 .ether_type = ETHER_TYPE_ARP,
315 .queue = link->arp_q,
318 return rte_eth_dev_filter_ctrl(link->pmd_id,
319 RTE_ETH_FILTER_ETHERTYPE,
325 app_link_filter_tcp_syn_add(struct app_link_params *link)
327 struct rte_eth_syn_filter filter = {
329 .queue = link->tcp_syn_q,
332 return rte_eth_dev_filter_ctrl(link->pmd_id,
339 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
341 struct rte_eth_ntuple_filter filter = {
342 .flags = RTE_5TUPLE_FLAGS,
343 .dst_ip = rte_bswap32(l2->ip),
344 .dst_ip_mask = UINT32_MAX, /* Enable */
346 .src_ip_mask = 0, /* Disable */
348 .dst_port_mask = 0, /* Disable */
350 .src_port_mask = 0, /* Disable */
352 .proto_mask = 0, /* Disable */
354 .priority = 1, /* Lowest */
355 .queue = l1->ip_local_q,
358 return rte_eth_dev_filter_ctrl(l1->pmd_id,
359 RTE_ETH_FILTER_NTUPLE,
365 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
367 struct rte_eth_ntuple_filter filter = {
368 .flags = RTE_5TUPLE_FLAGS,
369 .dst_ip = rte_bswap32(l2->ip),
370 .dst_ip_mask = UINT32_MAX, /* Enable */
372 .src_ip_mask = 0, /* Disable */
374 .dst_port_mask = 0, /* Disable */
376 .src_port_mask = 0, /* Disable */
378 .proto_mask = 0, /* Disable */
380 .priority = 1, /* Lowest */
381 .queue = l1->ip_local_q,
384 return rte_eth_dev_filter_ctrl(l1->pmd_id,
385 RTE_ETH_FILTER_NTUPLE,
386 RTE_ETH_FILTER_DELETE,
391 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
393 struct rte_eth_ntuple_filter filter = {
394 .flags = RTE_5TUPLE_FLAGS,
395 .dst_ip = rte_bswap32(l2->ip),
396 .dst_ip_mask = UINT32_MAX, /* Enable */
398 .src_ip_mask = 0, /* Disable */
400 .dst_port_mask = 0, /* Disable */
402 .src_port_mask = 0, /* Disable */
403 .proto = IPPROTO_TCP,
404 .proto_mask = UINT8_MAX, /* Enable */
406 .priority = 2, /* Higher priority than IP */
407 .queue = l1->tcp_local_q,
410 return rte_eth_dev_filter_ctrl(l1->pmd_id,
411 RTE_ETH_FILTER_NTUPLE,
417 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
419 struct rte_eth_ntuple_filter filter = {
420 .flags = RTE_5TUPLE_FLAGS,
421 .dst_ip = rte_bswap32(l2->ip),
422 .dst_ip_mask = UINT32_MAX, /* Enable */
424 .src_ip_mask = 0, /* Disable */
426 .dst_port_mask = 0, /* Disable */
428 .src_port_mask = 0, /* Disable */
429 .proto = IPPROTO_TCP,
430 .proto_mask = UINT8_MAX, /* Enable */
432 .priority = 2, /* Higher priority than IP */
433 .queue = l1->tcp_local_q,
436 return rte_eth_dev_filter_ctrl(l1->pmd_id,
437 RTE_ETH_FILTER_NTUPLE,
438 RTE_ETH_FILTER_DELETE,
443 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
445 struct rte_eth_ntuple_filter filter = {
446 .flags = RTE_5TUPLE_FLAGS,
447 .dst_ip = rte_bswap32(l2->ip),
448 .dst_ip_mask = UINT32_MAX, /* Enable */
450 .src_ip_mask = 0, /* Disable */
452 .dst_port_mask = 0, /* Disable */
454 .src_port_mask = 0, /* Disable */
455 .proto = IPPROTO_UDP,
456 .proto_mask = UINT8_MAX, /* Enable */
458 .priority = 2, /* Higher priority than IP */
459 .queue = l1->udp_local_q,
462 return rte_eth_dev_filter_ctrl(l1->pmd_id,
463 RTE_ETH_FILTER_NTUPLE,
469 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
471 struct rte_eth_ntuple_filter filter = {
472 .flags = RTE_5TUPLE_FLAGS,
473 .dst_ip = rte_bswap32(l2->ip),
474 .dst_ip_mask = UINT32_MAX, /* Enable */
476 .src_ip_mask = 0, /* Disable */
478 .dst_port_mask = 0, /* Disable */
480 .src_port_mask = 0, /* Disable */
481 .proto = IPPROTO_UDP,
482 .proto_mask = UINT8_MAX, /* Enable */
484 .priority = 2, /* Higher priority than IP */
485 .queue = l1->udp_local_q,
488 return rte_eth_dev_filter_ctrl(l1->pmd_id,
489 RTE_ETH_FILTER_NTUPLE,
490 RTE_ETH_FILTER_DELETE,
495 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
497 struct rte_eth_ntuple_filter filter = {
498 .flags = RTE_5TUPLE_FLAGS,
499 .dst_ip = rte_bswap32(l2->ip),
500 .dst_ip_mask = UINT32_MAX, /* Enable */
502 .src_ip_mask = 0, /* Disable */
504 .dst_port_mask = 0, /* Disable */
506 .src_port_mask = 0, /* Disable */
507 .proto = IPPROTO_SCTP,
508 .proto_mask = UINT8_MAX, /* Enable */
510 .priority = 2, /* Higher priority than IP */
511 .queue = l1->sctp_local_q,
514 return rte_eth_dev_filter_ctrl(l1->pmd_id,
515 RTE_ETH_FILTER_NTUPLE,
521 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
523 struct rte_eth_ntuple_filter filter = {
524 .flags = RTE_5TUPLE_FLAGS,
525 .dst_ip = rte_bswap32(l2->ip),
526 .dst_ip_mask = UINT32_MAX, /* Enable */
528 .src_ip_mask = 0, /* Disable */
530 .dst_port_mask = 0, /* Disable */
532 .src_port_mask = 0, /* Disable */
533 .proto = IPPROTO_SCTP,
534 .proto_mask = UINT8_MAX, /* Enable */
536 .priority = 2, /* Higher priority than IP */
537 .queue = l1->sctp_local_q,
540 return rte_eth_dev_filter_ctrl(l1->pmd_id,
541 RTE_ETH_FILTER_NTUPLE,
542 RTE_ETH_FILTER_DELETE,
547 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
549 if (cp->arp_q != 0) {
550 int status = app_link_filter_arp_add(cp);
552 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
553 "Adding ARP filter (queue = %" PRIu32 ")",
554 cp->name, cp->pmd_id, cp->arp_q);
557 rte_panic("%s (%" PRIu32 "): "
558 "Error adding ARP filter "
559 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
560 cp->name, cp->pmd_id, cp->arp_q, status);
565 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
567 if (cp->tcp_syn_q != 0) {
568 int status = app_link_filter_tcp_syn_add(cp);
570 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
571 "Adding TCP SYN filter (queue = %" PRIu32 ")",
572 cp->name, cp->pmd_id, cp->tcp_syn_q);
575 rte_panic("%s (%" PRIu32 "): "
576 "Error adding TCP SYN filter "
577 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
578 cp->name, cp->pmd_id, cp->tcp_syn_q,
584 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
589 /* For each link, add filters for IP of current link */
591 for (i = 0; i < app->n_links; i++) {
592 struct app_link_params *p = &app->link_params[i];
595 if (p->ip_local_q != 0) {
596 int status = app_link_filter_ip_add(p, cp);
598 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
599 "Adding IP filter (queue= %" PRIu32
600 ", IP = 0x%08" PRIx32 ")",
601 p->name, p->pmd_id, p->ip_local_q,
605 rte_panic("%s (%" PRIu32 "): "
607 "filter (queue= %" PRIu32 ", "
611 p->ip_local_q, cp->ip, status);
615 if (p->tcp_local_q != 0) {
616 int status = app_link_filter_tcp_add(p, cp);
618 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
621 ", IP = 0x%08" PRIx32 ")",
622 p->name, p->pmd_id, p->tcp_local_q,
626 rte_panic("%s (%" PRIu32 "): "
628 "filter (queue = %" PRIu32 ", "
632 p->tcp_local_q, cp->ip, status);
636 if (p->udp_local_q != 0) {
637 int status = app_link_filter_udp_add(p, cp);
639 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
642 ", IP = 0x%08" PRIx32 ")",
643 p->name, p->pmd_id, p->udp_local_q,
647 rte_panic("%s (%" PRIu32 "): "
649 "filter (queue = %" PRIu32 ", "
653 p->udp_local_q, cp->ip, status);
657 if (p->sctp_local_q != 0) {
658 int status = app_link_filter_sctp_add(p, cp);
660 APP_LOG(app, LOW, "%s (%" PRIu32
661 "): Adding SCTP filter "
663 ", IP = 0x%08" PRIx32 ")",
664 p->name, p->pmd_id, p->sctp_local_q,
668 rte_panic("%s (%" PRIu32 "): "
670 "filter (queue = %" PRIu32 ", "
674 p->sctp_local_q, cp->ip,
681 status = rte_eth_dev_set_link_up(cp->pmd_id);
682 /* Do not panic if PMD does not provide link up functionality */
683 if (status < 0 && status != -ENOTSUP)
684 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
685 PRId32 "\n", cp->name, cp->pmd_id, status);
687 /* Mark link as UP */
692 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
698 status = rte_eth_dev_set_link_down(cp->pmd_id);
699 /* Do not panic if PMD does not provide link down functionality */
700 if (status < 0 && status != -ENOTSUP)
701 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
702 PRId32 "\n", cp->name, cp->pmd_id, status);
704 /* Mark link as DOWN */
707 /* Return if current link IP is not valid */
711 /* For each link, remove filters for IP of current link */
712 for (i = 0; i < app->n_links; i++) {
713 struct app_link_params *p = &app->link_params[i];
716 if (p->ip_local_q != 0) {
717 int status = app_link_filter_ip_del(p, cp);
719 APP_LOG(app, LOW, "%s (%" PRIu32
720 "): Deleting IP filter "
721 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
722 p->name, p->pmd_id, p->ip_local_q, cp->ip);
725 rte_panic("%s (%" PRIu32
726 "): Error deleting IP filter "
730 p->name, p->pmd_id, p->ip_local_q,
735 if (p->tcp_local_q != 0) {
736 int status = app_link_filter_tcp_del(p, cp);
738 APP_LOG(app, LOW, "%s (%" PRIu32
739 "): Deleting TCP filter "
741 ", IP = 0x%" PRIx32 ")",
742 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
745 rte_panic("%s (%" PRIu32
746 "): Error deleting TCP filter "
750 p->name, p->pmd_id, p->tcp_local_q,
755 if (p->udp_local_q != 0) {
756 int status = app_link_filter_udp_del(p, cp);
758 APP_LOG(app, LOW, "%s (%" PRIu32
759 "): Deleting UDP filter "
760 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
761 p->name, p->pmd_id, p->udp_local_q, cp->ip);
764 rte_panic("%s (%" PRIu32
765 "): Error deleting UDP filter "
769 p->name, p->pmd_id, p->udp_local_q,
774 if (p->sctp_local_q != 0) {
775 int status = app_link_filter_sctp_del(p, cp);
777 APP_LOG(app, LOW, "%s (%" PRIu32
778 "): Deleting SCTP filter "
780 ", IP = 0x%" PRIx32 ")",
781 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
784 rte_panic("%s (%" PRIu32
785 "): Error deleting SCTP filter "
789 p->name, p->pmd_id, p->sctp_local_q,
796 app_check_link(struct app_params *app)
798 uint32_t all_links_up, i;
802 for (i = 0; i < app->n_links; i++) {
803 struct app_link_params *p = &app->link_params[i];
804 struct rte_eth_link link_params;
806 memset(&link_params, 0, sizeof(link_params));
807 rte_eth_link_get(p->pmd_id, &link_params);
809 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
812 link_params.link_speed / 1000,
813 link_params.link_status ? "UP" : "DOWN");
815 if (link_params.link_status == ETH_LINK_DOWN)
819 if (all_links_up == 0)
820 rte_panic("Some links are DOWN\n");
824 is_any_swq_frag_or_ras(struct app_params *app)
828 for (i = 0; i < app->n_pktq_swq; i++) {
829 struct app_pktq_swq_params *p = &app->swq_params[i];
831 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
832 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
840 app_init_link_frag_ras(struct app_params *app)
844 if (is_any_swq_frag_or_ras(app)) {
845 for (i = 0; i < app->n_links; i++) {
846 struct app_link_params *p_link = &app->link_params[i];
847 p_link->conf.txmode.offloads |=
848 DEV_TX_OFFLOAD_MULTI_SEGS;
854 app_get_cpu_socket_id(uint32_t pmd_id)
856 int status = rte_eth_dev_socket_id(pmd_id);
858 return (status != SOCKET_ID_ANY) ? status : 0;
862 app_link_rss_enabled(struct app_link_params *cp)
864 return (cp->n_rss_qs) ? 1 : 0;
868 app_link_rss_setup(struct app_link_params *cp)
870 struct rte_eth_dev_info dev_info;
871 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
876 memset(&dev_info, 0, sizeof(dev_info));
877 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
879 if (dev_info.reta_size == 0)
880 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
881 cp->name, cp->pmd_id);
883 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
884 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
885 cp->name, cp->pmd_id);
887 /* Setup RETA contents */
888 memset(reta_conf, 0, sizeof(reta_conf));
890 for (i = 0; i < dev_info.reta_size; i++)
891 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
893 for (i = 0; i < dev_info.reta_size; i++) {
894 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
895 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
896 uint32_t rss_qs_pos = i % cp->n_rss_qs;
898 reta_conf[reta_id].reta[reta_pos] =
899 (uint16_t) cp->rss_qs[rss_qs_pos];
903 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
907 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
908 cp->name, cp->pmd_id);
912 app_init_link_set_config(struct app_link_params *p)
915 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
916 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
923 app_init_link(struct app_params *app)
927 app_init_link_frag_ras(app);
929 for (i = 0; i < app->n_links; i++) {
930 struct app_link_params *p_link = &app->link_params[i];
931 struct rte_eth_dev_info dev_info;
932 uint32_t link_id, n_hwq_in, n_hwq_out, j;
935 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
936 n_hwq_in = app_link_get_n_rxq(app, p_link);
937 n_hwq_out = app_link_get_n_txq(app, p_link);
938 app_init_link_set_config(p_link);
940 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
941 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
948 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
949 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
950 p_link->conf.txmode.offloads |=
951 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
952 status = rte_eth_dev_configure(
958 rte_panic("%s (%" PRId32 "): "
959 "init error (%" PRId32 ")\n",
960 p_link->name, p_link->pmd_id, status);
962 rte_eth_macaddr_get(p_link->pmd_id,
963 (struct ether_addr *) &p_link->mac_addr);
966 rte_eth_promiscuous_enable(p_link->pmd_id);
969 for (j = 0; j < app->n_pktq_hwq_in; j++) {
970 struct app_pktq_hwq_in_params *p_rxq =
971 &app->hwq_in_params[j];
972 uint32_t rxq_link_id, rxq_queue_id;
973 uint16_t nb_rxd = p_rxq->size;
975 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
976 &rxq_link_id, &rxq_queue_id);
977 if (rxq_link_id != link_id)
980 status = rte_eth_dev_adjust_nb_rx_tx_desc(
985 rte_panic("%s (%" PRIu32 "): "
986 "%s adjust number of Rx descriptors "
987 "error (%" PRId32 ")\n",
993 p_rxq->conf.offloads = p_link->conf.rxmode.offloads;
994 status = rte_eth_rx_queue_setup(
998 app_get_cpu_socket_id(p_link->pmd_id),
1000 app->mempool[p_rxq->mempool_id]);
1002 rte_panic("%s (%" PRIu32 "): "
1003 "%s init error (%" PRId32 ")\n",
1011 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1012 struct app_pktq_hwq_out_params *p_txq =
1013 &app->hwq_out_params[j];
1014 uint32_t txq_link_id, txq_queue_id;
1015 uint16_t nb_txd = p_txq->size;
1017 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1018 &txq_link_id, &txq_queue_id);
1019 if (txq_link_id != link_id)
1022 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1027 rte_panic("%s (%" PRIu32 "): "
1028 "%s adjust number of Tx descriptors "
1029 "error (%" PRId32 ")\n",
1035 p_txq->conf.offloads = p_link->conf.txmode.offloads;
1036 status = rte_eth_tx_queue_setup(
1040 app_get_cpu_socket_id(p_link->pmd_id),
1043 rte_panic("%s (%" PRIu32 "): "
1044 "%s init error (%" PRId32 ")\n",
1052 status = rte_eth_dev_start(p_link->pmd_id);
1054 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1055 p_link->name, status);
1058 app_link_set_arp_filter(app, p_link);
1059 app_link_set_tcp_syn_filter(app, p_link);
1060 if (app_link_rss_enabled(p_link))
1061 app_link_rss_setup(p_link);
1064 app_link_up_internal(app, p_link);
1067 app_check_link(app);
1071 app_init_swq(struct app_params *app)
1075 for (i = 0; i < app->n_pktq_swq; i++) {
1076 struct app_pktq_swq_params *p = &app->swq_params[i];
1079 if (app_swq_get_readers(app, p) == 1)
1080 flags |= RING_F_SC_DEQ;
1081 if (app_swq_get_writers(app, p) == 1)
1082 flags |= RING_F_SP_ENQ;
1084 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1085 app->swq[i] = rte_ring_create(
1091 if (app->swq[i] == NULL)
1092 rte_panic("%s init error\n", p->name);
1097 app_init_tm(struct app_params *app)
1101 for (i = 0; i < app->n_pktq_tm; i++) {
1102 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1103 struct app_link_params *p_link;
1104 struct rte_eth_link link_eth_params;
1105 struct rte_sched_port *sched;
1106 uint32_t n_subports, subport_id;
1109 p_link = app_get_link_for_tm(app, p_tm);
1111 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1114 p_tm->sched_port_params.name = p_tm->name;
1115 p_tm->sched_port_params.socket =
1116 app_get_cpu_socket_id(p_link->pmd_id);
1117 p_tm->sched_port_params.rate =
1118 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1120 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1121 sched = rte_sched_port_config(&p_tm->sched_port_params);
1123 rte_panic("%s init error\n", p_tm->name);
1127 n_subports = p_tm->sched_port_params.n_subports_per_port;
1128 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1129 uint32_t n_pipes_per_subport, pipe_id;
1131 status = rte_sched_subport_config(sched,
1133 &p_tm->sched_subport_params[subport_id]);
1135 rte_panic("%s subport %" PRIu32
1136 " init error (%" PRId32 ")\n",
1137 p_tm->name, subport_id, status);
1140 n_pipes_per_subport =
1141 p_tm->sched_port_params.n_pipes_per_subport;
1143 pipe_id < n_pipes_per_subport;
1145 int profile_id = p_tm->sched_pipe_to_profile[
1146 subport_id * APP_MAX_SCHED_PIPES +
1149 if (profile_id == -1)
1152 status = rte_sched_pipe_config(sched,
1157 rte_panic("%s subport %" PRIu32
1159 " (profile %" PRId32 ") "
1160 "init error (% " PRId32 ")\n",
1161 p_tm->name, subport_id, pipe_id,
1162 profile_id, status);
1168 #ifndef RTE_EXEC_ENV_LINUXAPP
1170 app_init_tap(struct app_params *app) {
1171 if (app->n_pktq_tap == 0)
1174 rte_panic("TAP device not supported.\n");
1178 app_init_tap(struct app_params *app)
1182 for (i = 0; i < app->n_pktq_tap; i++) {
1183 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1187 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1189 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1191 rte_panic("Cannot open file /dev/net/tun\n");
1193 memset(&ifr, 0, sizeof(ifr));
1194 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1195 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1197 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1199 rte_panic("TAP setup error\n");
1206 #ifdef RTE_LIBRTE_KNI
1208 kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
1211 if (port_id >= rte_eth_dev_count())
1215 rte_eth_dev_set_link_up(port_id) :
1216 rte_eth_dev_set_link_down(port_id);
1222 kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
1225 if (port_id >= rte_eth_dev_count())
1228 if (new_mtu > ETHER_MAX_LEN)
1232 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1238 #endif /* RTE_LIBRTE_KNI */
1240 #ifndef RTE_LIBRTE_KNI
1242 app_init_kni(struct app_params *app) {
1243 if (app->n_pktq_kni == 0)
1246 rte_panic("Can not init KNI without librte_kni support.\n");
1250 app_init_kni(struct app_params *app) {
1253 if (app->n_pktq_kni == 0)
1256 rte_kni_init(app->n_pktq_kni);
1258 for (i = 0; i < app->n_pktq_kni; i++) {
1259 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1260 struct app_link_params *p_link;
1261 struct rte_eth_dev_info dev_info;
1262 struct app_mempool_params *mempool_params;
1263 struct rte_mempool *mempool;
1264 struct rte_kni_conf conf;
1265 struct rte_kni_ops ops;
1268 p_link = app_get_link_for_kni(app, p_kni);
1269 memset(&dev_info, 0, sizeof(dev_info));
1270 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1273 mempool_params = &app->mempool_params[p_kni->mempool_id];
1274 mempool = app->mempool[p_kni->mempool_id];
1277 memset(&conf, 0, sizeof(conf));
1278 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1279 conf.force_bind = p_kni->force_bind;
1280 if (conf.force_bind) {
1283 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1286 p_kni->hyper_th_id);
1289 rte_panic("%s invalid CPU core\n", p_kni->name);
1291 conf.core_id = (uint32_t) lcore_id;
1293 conf.group_id = p_link->pmd_id;
1294 conf.mbuf_size = mempool_params->buffer_size;
1295 conf.addr = dev_info.pci_dev->addr;
1296 conf.id = dev_info.pci_dev->id;
1298 memset(&ops, 0, sizeof(ops));
1299 ops.port_id = (uint8_t) p_link->pmd_id;
1300 ops.change_mtu = kni_change_mtu;
1301 ops.config_network_if = kni_config_network_interface;
1303 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1304 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1306 rte_panic("%s init error\n", p_kni->name);
1309 #endif /* RTE_LIBRTE_KNI */
1312 app_init_msgq(struct app_params *app)
1316 for (i = 0; i < app->n_msgq; i++) {
1317 struct app_msgq_params *p = &app->msgq_params[i];
1319 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1320 app->msgq[i] = rte_ring_create(
1324 RING_F_SP_ENQ | RING_F_SC_DEQ);
1326 if (app->msgq[i] == NULL)
1327 rte_panic("%s init error\n", p->name);
1331 void app_pipeline_params_get(struct app_params *app,
1332 struct app_pipeline_params *p_in,
1333 struct pipeline_params *p_out)
1337 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1339 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1341 p_out->socket_id = (int) p_in->socket_id;
1343 p_out->log_level = app->log_level;
1346 p_out->n_ports_in = p_in->n_pktq_in;
1347 for (i = 0; i < p_in->n_pktq_in; i++) {
1348 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1349 struct pipeline_port_in_params *out = &p_out->port_in[i];
1352 case APP_PKTQ_IN_HWQ:
1354 struct app_pktq_hwq_in_params *p_hwq_in =
1355 &app->hwq_in_params[in->id];
1356 struct app_link_params *p_link =
1357 app_get_link_for_rxq(app, p_hwq_in);
1358 uint32_t rxq_link_id, rxq_queue_id;
1360 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1364 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1365 out->params.ethdev.port_id = p_link->pmd_id;
1366 out->params.ethdev.queue_id = rxq_queue_id;
1367 out->burst_size = p_hwq_in->burst;
1370 case APP_PKTQ_IN_SWQ:
1372 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1374 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1375 if (app_swq_get_readers(app, swq_params) == 1) {
1376 out->type = PIPELINE_PORT_IN_RING_READER;
1377 out->params.ring.ring = app->swq[in->id];
1378 out->burst_size = app->swq_params[in->id].burst_read;
1380 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1381 out->params.ring_multi.ring = app->swq[in->id];
1382 out->burst_size = swq_params->burst_read;
1385 if (swq_params->ipv4_frag == 1) {
1386 struct rte_port_ring_reader_ipv4_frag_params *params =
1387 &out->params.ring_ipv4_frag;
1389 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1390 params->ring = app->swq[in->id];
1391 params->mtu = swq_params->mtu;
1392 params->metadata_size = swq_params->metadata_size;
1393 params->pool_direct =
1394 app->mempool[swq_params->mempool_direct_id];
1395 params->pool_indirect =
1396 app->mempool[swq_params->mempool_indirect_id];
1397 out->burst_size = swq_params->burst_read;
1399 struct rte_port_ring_reader_ipv6_frag_params *params =
1400 &out->params.ring_ipv6_frag;
1402 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1403 params->ring = app->swq[in->id];
1404 params->mtu = swq_params->mtu;
1405 params->metadata_size = swq_params->metadata_size;
1406 params->pool_direct =
1407 app->mempool[swq_params->mempool_direct_id];
1408 params->pool_indirect =
1409 app->mempool[swq_params->mempool_indirect_id];
1410 out->burst_size = swq_params->burst_read;
1415 case APP_PKTQ_IN_TM:
1417 out->type = PIPELINE_PORT_IN_SCHED_READER;
1418 out->params.sched.sched = app->tm[in->id];
1419 out->burst_size = app->tm_params[in->id].burst_read;
1422 #ifdef RTE_EXEC_ENV_LINUXAPP
1423 case APP_PKTQ_IN_TAP:
1425 struct app_pktq_tap_params *tap_params =
1426 &app->tap_params[in->id];
1427 struct app_mempool_params *mempool_params =
1428 &app->mempool_params[tap_params->mempool_id];
1429 struct rte_mempool *mempool =
1430 app->mempool[tap_params->mempool_id];
1432 out->type = PIPELINE_PORT_IN_FD_READER;
1433 out->params.fd.fd = app->tap[in->id];
1434 out->params.fd.mtu = mempool_params->buffer_size;
1435 out->params.fd.mempool = mempool;
1436 out->burst_size = app->tap_params[in->id].burst_read;
1440 #ifdef RTE_LIBRTE_KNI
1441 case APP_PKTQ_IN_KNI:
1443 out->type = PIPELINE_PORT_IN_KNI_READER;
1444 out->params.kni.kni = app->kni[in->id];
1445 out->burst_size = app->kni_params[in->id].burst_read;
1448 #endif /* RTE_LIBRTE_KNI */
1449 case APP_PKTQ_IN_SOURCE:
1451 uint32_t mempool_id =
1452 app->source_params[in->id].mempool_id;
1454 out->type = PIPELINE_PORT_IN_SOURCE;
1455 out->params.source.mempool = app->mempool[mempool_id];
1456 out->burst_size = app->source_params[in->id].burst;
1457 out->params.source.file_name =
1458 app->source_params[in->id].file_name;
1459 out->params.source.n_bytes_per_pkt =
1460 app->source_params[in->id].n_bytes_per_pkt;
1469 p_out->n_ports_out = p_in->n_pktq_out;
1470 for (i = 0; i < p_in->n_pktq_out; i++) {
1471 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1472 struct pipeline_port_out_params *out = &p_out->port_out[i];
1475 case APP_PKTQ_OUT_HWQ:
1477 struct app_pktq_hwq_out_params *p_hwq_out =
1478 &app->hwq_out_params[in->id];
1479 struct app_link_params *p_link =
1480 app_get_link_for_txq(app, p_hwq_out);
1481 uint32_t txq_link_id, txq_queue_id;
1483 sscanf(p_hwq_out->name,
1484 "TXQ%" SCNu32 ".%" SCNu32,
1488 if (p_hwq_out->dropless == 0) {
1489 struct rte_port_ethdev_writer_params *params =
1490 &out->params.ethdev;
1492 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1493 params->port_id = p_link->pmd_id;
1494 params->queue_id = txq_queue_id;
1495 params->tx_burst_sz =
1496 app->hwq_out_params[in->id].burst;
1498 struct rte_port_ethdev_writer_nodrop_params
1499 *params = &out->params.ethdev_nodrop;
1502 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1503 params->port_id = p_link->pmd_id;
1504 params->queue_id = txq_queue_id;
1505 params->tx_burst_sz = p_hwq_out->burst;
1506 params->n_retries = p_hwq_out->n_retries;
1510 case APP_PKTQ_OUT_SWQ:
1512 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1514 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1515 if (app_swq_get_writers(app, swq_params) == 1) {
1516 if (app->swq_params[in->id].dropless == 0) {
1517 struct rte_port_ring_writer_params *params =
1520 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1521 params->ring = app->swq[in->id];
1522 params->tx_burst_sz =
1523 app->swq_params[in->id].burst_write;
1525 struct rte_port_ring_writer_nodrop_params
1526 *params = &out->params.ring_nodrop;
1529 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1530 params->ring = app->swq[in->id];
1531 params->tx_burst_sz =
1532 app->swq_params[in->id].burst_write;
1534 app->swq_params[in->id].n_retries;
1537 if (swq_params->dropless == 0) {
1538 struct rte_port_ring_multi_writer_params *params =
1539 &out->params.ring_multi;
1541 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1542 params->ring = app->swq[in->id];
1543 params->tx_burst_sz = swq_params->burst_write;
1545 struct rte_port_ring_multi_writer_nodrop_params
1546 *params = &out->params.ring_multi_nodrop;
1548 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1549 params->ring = app->swq[in->id];
1550 params->tx_burst_sz = swq_params->burst_write;
1551 params->n_retries = swq_params->n_retries;
1555 if (swq_params->ipv4_ras == 1) {
1556 struct rte_port_ring_writer_ipv4_ras_params *params =
1557 &out->params.ring_ipv4_ras;
1559 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1560 params->ring = app->swq[in->id];
1561 params->tx_burst_sz = swq_params->burst_write;
1563 struct rte_port_ring_writer_ipv6_ras_params *params =
1564 &out->params.ring_ipv6_ras;
1566 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1567 params->ring = app->swq[in->id];
1568 params->tx_burst_sz = swq_params->burst_write;
1573 case APP_PKTQ_OUT_TM:
1575 struct rte_port_sched_writer_params *params =
1578 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1579 params->sched = app->tm[in->id];
1580 params->tx_burst_sz =
1581 app->tm_params[in->id].burst_write;
1584 #ifdef RTE_EXEC_ENV_LINUXAPP
1585 case APP_PKTQ_OUT_TAP:
1587 struct rte_port_fd_writer_params *params =
1590 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1591 params->fd = app->tap[in->id];
1592 params->tx_burst_sz =
1593 app->tap_params[in->id].burst_write;
1597 #ifdef RTE_LIBRTE_KNI
1598 case APP_PKTQ_OUT_KNI:
1600 struct app_pktq_kni_params *p_kni =
1601 &app->kni_params[in->id];
1603 if (p_kni->dropless == 0) {
1604 struct rte_port_kni_writer_params *params =
1607 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1608 params->kni = app->kni[in->id];
1609 params->tx_burst_sz =
1610 app->kni_params[in->id].burst_write;
1612 struct rte_port_kni_writer_nodrop_params
1613 *params = &out->params.kni_nodrop;
1615 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1616 params->kni = app->kni[in->id];
1617 params->tx_burst_sz =
1618 app->kni_params[in->id].burst_write;
1620 app->kni_params[in->id].n_retries;
1624 #endif /* RTE_LIBRTE_KNI */
1625 case APP_PKTQ_OUT_SINK:
1627 out->type = PIPELINE_PORT_OUT_SINK;
1628 out->params.sink.file_name =
1629 app->sink_params[in->id].file_name;
1630 out->params.sink.max_n_pkts =
1631 app->sink_params[in->id].
1642 p_out->n_msgq = p_in->n_msgq_in;
1644 for (i = 0; i < p_in->n_msgq_in; i++)
1645 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1647 for (i = 0; i < p_in->n_msgq_out; i++)
1648 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1651 p_out->n_args = p_in->n_args;
1652 for (i = 0; i < p_in->n_args; i++) {
1653 p_out->args_name[i] = p_in->args_name[i];
1654 p_out->args_value[i] = p_in->args_value[i];
1659 app_init_pipelines(struct app_params *app)
1663 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1664 struct app_pipeline_params *params =
1665 &app->pipeline_params[p_id];
1666 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1667 struct pipeline_type *ptype;
1668 struct pipeline_params pp;
1670 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1672 ptype = app_pipeline_type_find(app, params->type);
1674 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1677 app_pipeline_params_get(app, params, &pp);
1681 if (ptype->be_ops->f_init) {
1682 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1684 if (data->be == NULL)
1685 rte_panic("Pipeline instance \"%s\" back-end "
1686 "init error\n", params->name);
1691 if (ptype->fe_ops->f_init) {
1692 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1694 if (data->fe == NULL)
1695 rte_panic("Pipeline instance \"%s\" front-end "
1696 "init error\n", params->name);
1699 data->ptype = ptype;
1701 data->timer_period = (rte_get_tsc_hz() *
1702 params->timer_period) / 1000;
1707 app_post_init_pipelines(struct app_params *app)
1711 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1712 struct app_pipeline_params *params =
1713 &app->pipeline_params[p_id];
1714 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1717 if (data->ptype->fe_ops->f_post_init == NULL)
1720 status = data->ptype->fe_ops->f_post_init(data->fe);
1722 rte_panic("Pipeline instance \"%s\" front-end "
1723 "post-init error\n", params->name);
1728 app_init_threads(struct app_params *app)
1730 uint64_t time = rte_get_tsc_cycles();
1733 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1734 struct app_pipeline_params *params =
1735 &app->pipeline_params[p_id];
1736 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1737 struct pipeline_type *ptype;
1738 struct app_thread_data *t;
1739 struct app_thread_pipeline_data *p;
1742 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1745 params->hyper_th_id);
1748 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1751 (params->hyper_th_id) ? "h" : "");
1753 t = &app->thread_data[lcore_id];
1755 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1756 t->thread_req_deadline = time + t->timer_period;
1758 t->headroom_cycles = 0;
1759 t->headroom_time = rte_get_tsc_cycles();
1760 t->headroom_ratio = 0.0;
1762 t->msgq_in = app_thread_msgq_in_get(app,
1765 params->hyper_th_id);
1766 if (t->msgq_in == NULL)
1767 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1770 t->msgq_out = app_thread_msgq_out_get(app,
1773 params->hyper_th_id);
1774 if (t->msgq_out == NULL)
1775 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1778 ptype = app_pipeline_type_find(app, params->type);
1780 rte_panic("Init error: Unknown pipeline "
1781 "type \"%s\"\n", params->type);
1783 p = (ptype->be_ops->f_run == NULL) ?
1784 &t->regular[t->n_regular] :
1785 &t->custom[t->n_custom];
1787 p->pipeline_id = p_id;
1789 p->f_run = ptype->be_ops->f_run;
1790 p->f_timer = ptype->be_ops->f_timer;
1791 p->timer_period = data->timer_period;
1792 p->deadline = time + data->timer_period;
1796 if (ptype->be_ops->f_run == NULL)
1803 int app_init(struct app_params *app)
1805 app_init_core_map(app);
1806 app_init_core_mask(app);
1809 app_init_mempool(app);
1817 app_pipeline_common_cmd_push(app);
1818 app_pipeline_thread_cmd_push(app);
1819 app_pipeline_type_register(app, &pipeline_master);
1821 app_init_pipelines(app);
1822 app_init_threads(app);
1827 int app_post_init(struct app_params *app)
1829 app_post_init_pipelines(app);
1835 app_pipeline_type_cmd_push(struct app_params *app,
1836 struct pipeline_type *ptype)
1838 cmdline_parse_ctx_t *cmds;
1841 /* Check input arguments */
1842 if ((app == NULL) ||
1846 n_cmds = pipeline_type_cmds_count(ptype);
1850 cmds = ptype->fe_ops->cmds;
1852 /* Check for available slots in the application commands array */
1853 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1856 /* Push pipeline commands into the application */
1857 memcpy(&app->cmds[app->n_cmds],
1859 n_cmds * sizeof(cmdline_parse_ctx_t));
1861 for (i = 0; i < n_cmds; i++)
1862 app->cmds[app->n_cmds + i]->data = app;
1864 app->n_cmds += n_cmds;
1865 app->cmds[app->n_cmds] = NULL;
1871 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1875 /* Check input arguments */
1876 if ((app == NULL) ||
1878 (ptype->name == NULL) ||
1879 (strlen(ptype->name) == 0) ||
1880 (ptype->be_ops->f_init == NULL) ||
1881 (ptype->be_ops->f_timer == NULL))
1884 /* Check for duplicate entry */
1885 for (i = 0; i < app->n_pipeline_types; i++)
1886 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1889 /* Check for resource availability */
1890 n_cmds = pipeline_type_cmds_count(ptype);
1891 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1892 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1895 /* Copy pipeline type */
1896 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1898 sizeof(struct pipeline_type));
1900 /* Copy CLI commands */
1902 app_pipeline_type_cmd_push(app, ptype);
1908 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1912 for (i = 0; i < app->n_pipeline_types; i++)
1913 if (strcmp(app->pipeline_type[i].name, name) == 0)
1914 return &app->pipeline_type[i];