1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
8 #include <netinet/in.h>
9 #ifdef RTE_EXEC_ENV_LINUXAPP
11 #include <linux/if_tun.h>
14 #include <sys/ioctl.h>
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
19 #include <rte_ether.h>
22 #include <rte_malloc.h>
23 #include <rte_bus_pci.h>
27 #include "pipeline_common_fe.h"
28 #include "pipeline_master.h"
29 #include "pipeline_firewall.h"
30 #include "thread_fe.h"
32 #define APP_NAME_SIZE 32
34 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
37 app_init_core_map(struct app_params *app)
39 APP_LOG(app, HIGH, "Initializing CPU core map ...");
40 app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
43 if (app->core_map == NULL)
44 rte_panic("Cannot create CPU core map\n");
46 if (app->log_level >= APP_LOG_LEVEL_LOW)
47 cpu_core_map_print(app->core_map);
50 /* Core Mask String in Hex Representation */
51 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
54 app_init_core_mask(struct app_params *app)
57 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
59 for (i = 0; i < app->n_pipelines; i++) {
60 struct app_pipeline_params *p = &app->pipeline_params[i];
63 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
69 rte_panic("Cannot create CPU core mask\n");
71 app_core_enable_in_core_mask(app, lcore_id);
74 app_core_build_core_mask_string(app, core_mask_str);
75 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
79 app_init_eal(struct app_params *app)
82 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
83 struct app_eal_params *p = &app->eal_params;
88 app->eal_argv[n_args++] = strdup(app->app_name);
90 app_core_build_core_mask_string(app, core_mask_str);
91 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
92 app->eal_argv[n_args++] = strdup(buffer);
95 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
96 app->eal_argv[n_args++] = strdup(buffer);
99 if (p->master_lcore_present) {
102 "--master-lcore=%" PRIu32,
104 app->eal_argv[n_args++] = strdup(buffer);
107 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
108 app->eal_argv[n_args++] = strdup(buffer);
110 if (p->memory_present) {
111 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
112 app->eal_argv[n_args++] = strdup(buffer);
115 if (p->ranks_present) {
116 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
117 app->eal_argv[n_args++] = strdup(buffer);
120 for (i = 0; i < APP_MAX_LINKS; i++) {
121 if (p->pci_blacklist[i] == NULL)
126 "--pci-blacklist=%s",
127 p->pci_blacklist[i]);
128 app->eal_argv[n_args++] = strdup(buffer);
131 if (app->port_mask != 0)
132 for (i = 0; i < APP_MAX_LINKS; i++) {
133 if (p->pci_whitelist[i] == NULL)
138 "--pci-whitelist=%s",
139 p->pci_whitelist[i]);
140 app->eal_argv[n_args++] = strdup(buffer);
143 for (i = 0; i < app->n_links; i++) {
144 char *pci_bdf = app->link_params[i].pci_bdf;
148 "--pci-whitelist=%s",
150 app->eal_argv[n_args++] = strdup(buffer);
153 for (i = 0; i < APP_MAX_LINKS; i++) {
154 if (p->vdev[i] == NULL)
161 app->eal_argv[n_args++] = strdup(buffer);
164 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
165 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
166 app->eal_argv[n_args++] = strdup(buffer);
174 app->eal_argv[n_args++] = strdup(buffer);
178 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
179 app->eal_argv[n_args++] = strdup(buffer);
182 if (p->log_level_present) {
185 "--log-level=%" PRIu32,
187 app->eal_argv[n_args++] = strdup(buffer);
190 if ((p->version_present) && p->version) {
191 snprintf(buffer, sizeof(buffer), "-v");
192 app->eal_argv[n_args++] = strdup(buffer);
195 if ((p->help_present) && p->help) {
196 snprintf(buffer, sizeof(buffer), "--help");
197 app->eal_argv[n_args++] = strdup(buffer);
200 if ((p->no_huge_present) && p->no_huge) {
201 snprintf(buffer, sizeof(buffer), "--no-huge");
202 app->eal_argv[n_args++] = strdup(buffer);
205 if ((p->no_pci_present) && p->no_pci) {
206 snprintf(buffer, sizeof(buffer), "--no-pci");
207 app->eal_argv[n_args++] = strdup(buffer);
210 if ((p->no_hpet_present) && p->no_hpet) {
211 snprintf(buffer, sizeof(buffer), "--no-hpet");
212 app->eal_argv[n_args++] = strdup(buffer);
215 if ((p->no_shconf_present) && p->no_shconf) {
216 snprintf(buffer, sizeof(buffer), "--no-shconf");
217 app->eal_argv[n_args++] = strdup(buffer);
221 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
222 app->eal_argv[n_args++] = strdup(buffer);
230 app->eal_argv[n_args++] = strdup(buffer);
234 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
235 app->eal_argv[n_args++] = strdup(buffer);
238 if (p->file_prefix) {
243 app->eal_argv[n_args++] = strdup(buffer);
246 if (p->base_virtaddr) {
249 "--base-virtaddr=%s",
251 app->eal_argv[n_args++] = strdup(buffer);
254 if ((p->create_uio_dev_present) && p->create_uio_dev) {
255 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
256 app->eal_argv[n_args++] = strdup(buffer);
264 app->eal_argv[n_args++] = strdup(buffer);
267 snprintf(buffer, sizeof(buffer), "--");
268 app->eal_argv[n_args++] = strdup(buffer);
270 app->eal_argc = n_args;
272 APP_LOG(app, HIGH, "Initializing EAL ...");
273 if (app->log_level >= APP_LOG_LEVEL_LOW) {
276 fprintf(stdout, "[APP] EAL arguments: \"");
277 for (i = 1; i < app->eal_argc; i++)
278 fprintf(stdout, "%s ", app->eal_argv[i]);
279 fprintf(stdout, "\"\n");
282 status = rte_eal_init(app->eal_argc, app->eal_argv);
284 rte_panic("EAL init error\n");
288 app_init_mempool(struct app_params *app)
292 for (i = 0; i < app->n_mempools; i++) {
293 struct app_mempool_params *p = &app->mempool_params[i];
295 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
296 app->mempool[i] = rte_pktmbuf_pool_create(
302 sizeof(struct rte_mbuf), /* mbuf data size */
305 if (app->mempool[i] == NULL)
306 rte_panic("%s init error\n", p->name);
311 app_link_filter_arp_add(struct app_link_params *link)
313 struct rte_eth_ethertype_filter filter = {
314 .ether_type = ETHER_TYPE_ARP,
316 .queue = link->arp_q,
319 return rte_eth_dev_filter_ctrl(link->pmd_id,
320 RTE_ETH_FILTER_ETHERTYPE,
326 app_link_filter_tcp_syn_add(struct app_link_params *link)
328 struct rte_eth_syn_filter filter = {
330 .queue = link->tcp_syn_q,
333 return rte_eth_dev_filter_ctrl(link->pmd_id,
340 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
342 struct rte_eth_ntuple_filter filter = {
343 .flags = RTE_5TUPLE_FLAGS,
344 .dst_ip = rte_bswap32(l2->ip),
345 .dst_ip_mask = UINT32_MAX, /* Enable */
347 .src_ip_mask = 0, /* Disable */
349 .dst_port_mask = 0, /* Disable */
351 .src_port_mask = 0, /* Disable */
353 .proto_mask = 0, /* Disable */
355 .priority = 1, /* Lowest */
356 .queue = l1->ip_local_q,
359 return rte_eth_dev_filter_ctrl(l1->pmd_id,
360 RTE_ETH_FILTER_NTUPLE,
366 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
368 struct rte_eth_ntuple_filter filter = {
369 .flags = RTE_5TUPLE_FLAGS,
370 .dst_ip = rte_bswap32(l2->ip),
371 .dst_ip_mask = UINT32_MAX, /* Enable */
373 .src_ip_mask = 0, /* Disable */
375 .dst_port_mask = 0, /* Disable */
377 .src_port_mask = 0, /* Disable */
379 .proto_mask = 0, /* Disable */
381 .priority = 1, /* Lowest */
382 .queue = l1->ip_local_q,
385 return rte_eth_dev_filter_ctrl(l1->pmd_id,
386 RTE_ETH_FILTER_NTUPLE,
387 RTE_ETH_FILTER_DELETE,
392 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
394 struct rte_eth_ntuple_filter filter = {
395 .flags = RTE_5TUPLE_FLAGS,
396 .dst_ip = rte_bswap32(l2->ip),
397 .dst_ip_mask = UINT32_MAX, /* Enable */
399 .src_ip_mask = 0, /* Disable */
401 .dst_port_mask = 0, /* Disable */
403 .src_port_mask = 0, /* Disable */
404 .proto = IPPROTO_TCP,
405 .proto_mask = UINT8_MAX, /* Enable */
407 .priority = 2, /* Higher priority than IP */
408 .queue = l1->tcp_local_q,
411 return rte_eth_dev_filter_ctrl(l1->pmd_id,
412 RTE_ETH_FILTER_NTUPLE,
418 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
420 struct rte_eth_ntuple_filter filter = {
421 .flags = RTE_5TUPLE_FLAGS,
422 .dst_ip = rte_bswap32(l2->ip),
423 .dst_ip_mask = UINT32_MAX, /* Enable */
425 .src_ip_mask = 0, /* Disable */
427 .dst_port_mask = 0, /* Disable */
429 .src_port_mask = 0, /* Disable */
430 .proto = IPPROTO_TCP,
431 .proto_mask = UINT8_MAX, /* Enable */
433 .priority = 2, /* Higher priority than IP */
434 .queue = l1->tcp_local_q,
437 return rte_eth_dev_filter_ctrl(l1->pmd_id,
438 RTE_ETH_FILTER_NTUPLE,
439 RTE_ETH_FILTER_DELETE,
444 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
446 struct rte_eth_ntuple_filter filter = {
447 .flags = RTE_5TUPLE_FLAGS,
448 .dst_ip = rte_bswap32(l2->ip),
449 .dst_ip_mask = UINT32_MAX, /* Enable */
451 .src_ip_mask = 0, /* Disable */
453 .dst_port_mask = 0, /* Disable */
455 .src_port_mask = 0, /* Disable */
456 .proto = IPPROTO_UDP,
457 .proto_mask = UINT8_MAX, /* Enable */
459 .priority = 2, /* Higher priority than IP */
460 .queue = l1->udp_local_q,
463 return rte_eth_dev_filter_ctrl(l1->pmd_id,
464 RTE_ETH_FILTER_NTUPLE,
470 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
472 struct rte_eth_ntuple_filter filter = {
473 .flags = RTE_5TUPLE_FLAGS,
474 .dst_ip = rte_bswap32(l2->ip),
475 .dst_ip_mask = UINT32_MAX, /* Enable */
477 .src_ip_mask = 0, /* Disable */
479 .dst_port_mask = 0, /* Disable */
481 .src_port_mask = 0, /* Disable */
482 .proto = IPPROTO_UDP,
483 .proto_mask = UINT8_MAX, /* Enable */
485 .priority = 2, /* Higher priority than IP */
486 .queue = l1->udp_local_q,
489 return rte_eth_dev_filter_ctrl(l1->pmd_id,
490 RTE_ETH_FILTER_NTUPLE,
491 RTE_ETH_FILTER_DELETE,
496 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
498 struct rte_eth_ntuple_filter filter = {
499 .flags = RTE_5TUPLE_FLAGS,
500 .dst_ip = rte_bswap32(l2->ip),
501 .dst_ip_mask = UINT32_MAX, /* Enable */
503 .src_ip_mask = 0, /* Disable */
505 .dst_port_mask = 0, /* Disable */
507 .src_port_mask = 0, /* Disable */
508 .proto = IPPROTO_SCTP,
509 .proto_mask = UINT8_MAX, /* Enable */
511 .priority = 2, /* Higher priority than IP */
512 .queue = l1->sctp_local_q,
515 return rte_eth_dev_filter_ctrl(l1->pmd_id,
516 RTE_ETH_FILTER_NTUPLE,
522 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
524 struct rte_eth_ntuple_filter filter = {
525 .flags = RTE_5TUPLE_FLAGS,
526 .dst_ip = rte_bswap32(l2->ip),
527 .dst_ip_mask = UINT32_MAX, /* Enable */
529 .src_ip_mask = 0, /* Disable */
531 .dst_port_mask = 0, /* Disable */
533 .src_port_mask = 0, /* Disable */
534 .proto = IPPROTO_SCTP,
535 .proto_mask = UINT8_MAX, /* Enable */
537 .priority = 2, /* Higher priority than IP */
538 .queue = l1->sctp_local_q,
541 return rte_eth_dev_filter_ctrl(l1->pmd_id,
542 RTE_ETH_FILTER_NTUPLE,
543 RTE_ETH_FILTER_DELETE,
548 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
550 if (cp->arp_q != 0) {
551 int status = app_link_filter_arp_add(cp);
553 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
554 "Adding ARP filter (queue = %" PRIu32 ")",
555 cp->name, cp->pmd_id, cp->arp_q);
558 rte_panic("%s (%" PRIu32 "): "
559 "Error adding ARP filter "
560 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
561 cp->name, cp->pmd_id, cp->arp_q, status);
566 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
568 if (cp->tcp_syn_q != 0) {
569 int status = app_link_filter_tcp_syn_add(cp);
571 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
572 "Adding TCP SYN filter (queue = %" PRIu32 ")",
573 cp->name, cp->pmd_id, cp->tcp_syn_q);
576 rte_panic("%s (%" PRIu32 "): "
577 "Error adding TCP SYN filter "
578 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
579 cp->name, cp->pmd_id, cp->tcp_syn_q,
585 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
590 /* For each link, add filters for IP of current link */
592 for (i = 0; i < app->n_links; i++) {
593 struct app_link_params *p = &app->link_params[i];
596 if (p->ip_local_q != 0) {
597 int status = app_link_filter_ip_add(p, cp);
599 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
600 "Adding IP filter (queue= %" PRIu32
601 ", IP = 0x%08" PRIx32 ")",
602 p->name, p->pmd_id, p->ip_local_q,
606 rte_panic("%s (%" PRIu32 "): "
608 "filter (queue= %" PRIu32 ", "
612 p->ip_local_q, cp->ip, status);
616 if (p->tcp_local_q != 0) {
617 int status = app_link_filter_tcp_add(p, cp);
619 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
622 ", IP = 0x%08" PRIx32 ")",
623 p->name, p->pmd_id, p->tcp_local_q,
627 rte_panic("%s (%" PRIu32 "): "
629 "filter (queue = %" PRIu32 ", "
633 p->tcp_local_q, cp->ip, status);
637 if (p->udp_local_q != 0) {
638 int status = app_link_filter_udp_add(p, cp);
640 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
643 ", IP = 0x%08" PRIx32 ")",
644 p->name, p->pmd_id, p->udp_local_q,
648 rte_panic("%s (%" PRIu32 "): "
650 "filter (queue = %" PRIu32 ", "
654 p->udp_local_q, cp->ip, status);
658 if (p->sctp_local_q != 0) {
659 int status = app_link_filter_sctp_add(p, cp);
661 APP_LOG(app, LOW, "%s (%" PRIu32
662 "): Adding SCTP filter "
664 ", IP = 0x%08" PRIx32 ")",
665 p->name, p->pmd_id, p->sctp_local_q,
669 rte_panic("%s (%" PRIu32 "): "
671 "filter (queue = %" PRIu32 ", "
675 p->sctp_local_q, cp->ip,
682 status = rte_eth_dev_set_link_up(cp->pmd_id);
683 /* Do not panic if PMD does not provide link up functionality */
684 if (status < 0 && status != -ENOTSUP)
685 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
686 PRId32 "\n", cp->name, cp->pmd_id, status);
688 /* Mark link as UP */
693 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
699 status = rte_eth_dev_set_link_down(cp->pmd_id);
700 /* Do not panic if PMD does not provide link down functionality */
701 if (status < 0 && status != -ENOTSUP)
702 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
703 PRId32 "\n", cp->name, cp->pmd_id, status);
705 /* Mark link as DOWN */
708 /* Return if current link IP is not valid */
712 /* For each link, remove filters for IP of current link */
713 for (i = 0; i < app->n_links; i++) {
714 struct app_link_params *p = &app->link_params[i];
717 if (p->ip_local_q != 0) {
718 int status = app_link_filter_ip_del(p, cp);
720 APP_LOG(app, LOW, "%s (%" PRIu32
721 "): Deleting IP filter "
722 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
723 p->name, p->pmd_id, p->ip_local_q, cp->ip);
726 rte_panic("%s (%" PRIu32
727 "): Error deleting IP filter "
731 p->name, p->pmd_id, p->ip_local_q,
736 if (p->tcp_local_q != 0) {
737 int status = app_link_filter_tcp_del(p, cp);
739 APP_LOG(app, LOW, "%s (%" PRIu32
740 "): Deleting TCP filter "
742 ", IP = 0x%" PRIx32 ")",
743 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
746 rte_panic("%s (%" PRIu32
747 "): Error deleting TCP filter "
751 p->name, p->pmd_id, p->tcp_local_q,
756 if (p->udp_local_q != 0) {
757 int status = app_link_filter_udp_del(p, cp);
759 APP_LOG(app, LOW, "%s (%" PRIu32
760 "): Deleting UDP filter "
761 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
762 p->name, p->pmd_id, p->udp_local_q, cp->ip);
765 rte_panic("%s (%" PRIu32
766 "): Error deleting UDP filter "
770 p->name, p->pmd_id, p->udp_local_q,
775 if (p->sctp_local_q != 0) {
776 int status = app_link_filter_sctp_del(p, cp);
778 APP_LOG(app, LOW, "%s (%" PRIu32
779 "): Deleting SCTP filter "
781 ", IP = 0x%" PRIx32 ")",
782 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
785 rte_panic("%s (%" PRIu32
786 "): Error deleting SCTP filter "
790 p->name, p->pmd_id, p->sctp_local_q,
797 app_check_link(struct app_params *app)
799 uint32_t all_links_up, i;
803 for (i = 0; i < app->n_links; i++) {
804 struct app_link_params *p = &app->link_params[i];
805 struct rte_eth_link link_params;
807 memset(&link_params, 0, sizeof(link_params));
808 rte_eth_link_get(p->pmd_id, &link_params);
810 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
813 link_params.link_speed / 1000,
814 link_params.link_status ? "UP" : "DOWN");
816 if (link_params.link_status == ETH_LINK_DOWN)
820 if (all_links_up == 0)
821 rte_panic("Some links are DOWN\n");
825 is_any_swq_frag_or_ras(struct app_params *app)
829 for (i = 0; i < app->n_pktq_swq; i++) {
830 struct app_pktq_swq_params *p = &app->swq_params[i];
832 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
833 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
841 app_init_link_frag_ras(struct app_params *app)
845 if (is_any_swq_frag_or_ras(app)) {
846 for (i = 0; i < app->n_links; i++) {
847 struct app_link_params *p_link = &app->link_params[i];
848 p_link->conf.txmode.offloads |=
849 DEV_TX_OFFLOAD_MULTI_SEGS;
855 app_get_cpu_socket_id(uint32_t pmd_id)
857 int status = rte_eth_dev_socket_id(pmd_id);
859 return (status != SOCKET_ID_ANY) ? status : 0;
863 app_link_rss_enabled(struct app_link_params *cp)
865 return (cp->n_rss_qs) ? 1 : 0;
869 app_link_rss_setup(struct app_link_params *cp)
871 struct rte_eth_dev_info dev_info;
872 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
877 memset(&dev_info, 0, sizeof(dev_info));
878 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
880 if (dev_info.reta_size == 0)
881 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
882 cp->name, cp->pmd_id);
884 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
885 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
886 cp->name, cp->pmd_id);
888 /* Setup RETA contents */
889 memset(reta_conf, 0, sizeof(reta_conf));
891 for (i = 0; i < dev_info.reta_size; i++)
892 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
894 for (i = 0; i < dev_info.reta_size; i++) {
895 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
896 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
897 uint32_t rss_qs_pos = i % cp->n_rss_qs;
899 reta_conf[reta_id].reta[reta_pos] =
900 (uint16_t) cp->rss_qs[rss_qs_pos];
904 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
908 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
909 cp->name, cp->pmd_id);
913 app_init_link_set_config(struct app_link_params *p)
916 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
917 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
924 app_init_link(struct app_params *app)
928 app_init_link_frag_ras(app);
930 for (i = 0; i < app->n_links; i++) {
931 struct app_link_params *p_link = &app->link_params[i];
932 struct rte_eth_dev_info dev_info;
933 uint32_t link_id, n_hwq_in, n_hwq_out, j;
936 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
937 n_hwq_in = app_link_get_n_rxq(app, p_link);
938 n_hwq_out = app_link_get_n_txq(app, p_link);
939 app_init_link_set_config(p_link);
941 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
942 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
949 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
950 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
951 p_link->conf.txmode.offloads |=
952 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
953 status = rte_eth_dev_configure(
959 rte_panic("%s (%" PRId32 "): "
960 "init error (%" PRId32 ")\n",
961 p_link->name, p_link->pmd_id, status);
963 rte_eth_macaddr_get(p_link->pmd_id,
964 (struct ether_addr *) &p_link->mac_addr);
967 rte_eth_promiscuous_enable(p_link->pmd_id);
970 for (j = 0; j < app->n_pktq_hwq_in; j++) {
971 struct app_pktq_hwq_in_params *p_rxq =
972 &app->hwq_in_params[j];
973 uint32_t rxq_link_id, rxq_queue_id;
974 uint16_t nb_rxd = p_rxq->size;
976 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
977 &rxq_link_id, &rxq_queue_id);
978 if (rxq_link_id != link_id)
981 status = rte_eth_dev_adjust_nb_rx_tx_desc(
986 rte_panic("%s (%" PRIu32 "): "
987 "%s adjust number of Rx descriptors "
988 "error (%" PRId32 ")\n",
994 p_rxq->conf.offloads = p_link->conf.rxmode.offloads;
995 status = rte_eth_rx_queue_setup(
999 app_get_cpu_socket_id(p_link->pmd_id),
1001 app->mempool[p_rxq->mempool_id]);
1003 rte_panic("%s (%" PRIu32 "): "
1004 "%s init error (%" PRId32 ")\n",
1012 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1013 struct app_pktq_hwq_out_params *p_txq =
1014 &app->hwq_out_params[j];
1015 uint32_t txq_link_id, txq_queue_id;
1016 uint16_t nb_txd = p_txq->size;
1018 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1019 &txq_link_id, &txq_queue_id);
1020 if (txq_link_id != link_id)
1023 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1028 rte_panic("%s (%" PRIu32 "): "
1029 "%s adjust number of Tx descriptors "
1030 "error (%" PRId32 ")\n",
1036 p_txq->conf.offloads = p_link->conf.txmode.offloads;
1037 status = rte_eth_tx_queue_setup(
1041 app_get_cpu_socket_id(p_link->pmd_id),
1044 rte_panic("%s (%" PRIu32 "): "
1045 "%s init error (%" PRId32 ")\n",
1053 status = rte_eth_dev_start(p_link->pmd_id);
1055 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1056 p_link->name, status);
1059 app_link_set_arp_filter(app, p_link);
1060 app_link_set_tcp_syn_filter(app, p_link);
1061 if (app_link_rss_enabled(p_link))
1062 app_link_rss_setup(p_link);
1065 app_link_up_internal(app, p_link);
1068 app_check_link(app);
1072 app_init_swq(struct app_params *app)
1076 for (i = 0; i < app->n_pktq_swq; i++) {
1077 struct app_pktq_swq_params *p = &app->swq_params[i];
1080 if (app_swq_get_readers(app, p) == 1)
1081 flags |= RING_F_SC_DEQ;
1082 if (app_swq_get_writers(app, p) == 1)
1083 flags |= RING_F_SP_ENQ;
1085 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1086 app->swq[i] = rte_ring_create(
1092 if (app->swq[i] == NULL)
1093 rte_panic("%s init error\n", p->name);
1098 app_init_tm(struct app_params *app)
1102 for (i = 0; i < app->n_pktq_tm; i++) {
1103 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1104 struct app_link_params *p_link;
1105 struct rte_eth_link link_eth_params;
1106 struct rte_sched_port *sched;
1107 uint32_t n_subports, subport_id;
1110 p_link = app_get_link_for_tm(app, p_tm);
1112 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1115 p_tm->sched_port_params.name = p_tm->name;
1116 p_tm->sched_port_params.socket =
1117 app_get_cpu_socket_id(p_link->pmd_id);
1118 p_tm->sched_port_params.rate =
1119 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1121 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1122 sched = rte_sched_port_config(&p_tm->sched_port_params);
1124 rte_panic("%s init error\n", p_tm->name);
1128 n_subports = p_tm->sched_port_params.n_subports_per_port;
1129 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1130 uint32_t n_pipes_per_subport, pipe_id;
1132 status = rte_sched_subport_config(sched,
1134 &p_tm->sched_subport_params[subport_id]);
1136 rte_panic("%s subport %" PRIu32
1137 " init error (%" PRId32 ")\n",
1138 p_tm->name, subport_id, status);
1141 n_pipes_per_subport =
1142 p_tm->sched_port_params.n_pipes_per_subport;
1144 pipe_id < n_pipes_per_subport;
1146 int profile_id = p_tm->sched_pipe_to_profile[
1147 subport_id * APP_MAX_SCHED_PIPES +
1150 if (profile_id == -1)
1153 status = rte_sched_pipe_config(sched,
1158 rte_panic("%s subport %" PRIu32
1160 " (profile %" PRId32 ") "
1161 "init error (% " PRId32 ")\n",
1162 p_tm->name, subport_id, pipe_id,
1163 profile_id, status);
1169 #ifndef RTE_EXEC_ENV_LINUXAPP
1171 app_init_tap(struct app_params *app) {
1172 if (app->n_pktq_tap == 0)
1175 rte_panic("TAP device not supported.\n");
1179 app_init_tap(struct app_params *app)
1183 for (i = 0; i < app->n_pktq_tap; i++) {
1184 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1188 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1190 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1192 rte_panic("Cannot open file /dev/net/tun\n");
1194 memset(&ifr, 0, sizeof(ifr));
1195 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1196 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1198 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1200 rte_panic("TAP setup error\n");
1207 #ifdef RTE_LIBRTE_KNI
1209 kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
1212 if (port_id >= rte_eth_dev_count())
1216 rte_eth_dev_set_link_up(port_id) :
1217 rte_eth_dev_set_link_down(port_id);
1223 kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
1226 if (port_id >= rte_eth_dev_count())
1229 if (new_mtu > ETHER_MAX_LEN)
1233 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1239 #endif /* RTE_LIBRTE_KNI */
1241 #ifndef RTE_LIBRTE_KNI
1243 app_init_kni(struct app_params *app) {
1244 if (app->n_pktq_kni == 0)
1247 rte_panic("Can not init KNI without librte_kni support.\n");
1251 app_init_kni(struct app_params *app) {
1254 if (app->n_pktq_kni == 0)
1257 rte_kni_init(app->n_pktq_kni);
1259 for (i = 0; i < app->n_pktq_kni; i++) {
1260 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1261 struct app_link_params *p_link;
1262 struct rte_eth_dev_info dev_info;
1263 struct app_mempool_params *mempool_params;
1264 struct rte_mempool *mempool;
1265 struct rte_kni_conf conf;
1266 struct rte_kni_ops ops;
1269 p_link = app_get_link_for_kni(app, p_kni);
1270 memset(&dev_info, 0, sizeof(dev_info));
1271 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1274 mempool_params = &app->mempool_params[p_kni->mempool_id];
1275 mempool = app->mempool[p_kni->mempool_id];
1278 memset(&conf, 0, sizeof(conf));
1279 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1280 conf.force_bind = p_kni->force_bind;
1281 if (conf.force_bind) {
1284 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1287 p_kni->hyper_th_id);
1290 rte_panic("%s invalid CPU core\n", p_kni->name);
1292 conf.core_id = (uint32_t) lcore_id;
1294 conf.group_id = p_link->pmd_id;
1295 conf.mbuf_size = mempool_params->buffer_size;
1296 conf.addr = dev_info.pci_dev->addr;
1297 conf.id = dev_info.pci_dev->id;
1299 memset(&ops, 0, sizeof(ops));
1300 ops.port_id = (uint8_t) p_link->pmd_id;
1301 ops.change_mtu = kni_change_mtu;
1302 ops.config_network_if = kni_config_network_interface;
1304 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1305 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1307 rte_panic("%s init error\n", p_kni->name);
1310 #endif /* RTE_LIBRTE_KNI */
1313 app_init_msgq(struct app_params *app)
1317 for (i = 0; i < app->n_msgq; i++) {
1318 struct app_msgq_params *p = &app->msgq_params[i];
1320 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1321 app->msgq[i] = rte_ring_create(
1325 RING_F_SP_ENQ | RING_F_SC_DEQ);
1327 if (app->msgq[i] == NULL)
1328 rte_panic("%s init error\n", p->name);
1332 void app_pipeline_params_get(struct app_params *app,
1333 struct app_pipeline_params *p_in,
1334 struct pipeline_params *p_out)
1338 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1340 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1342 p_out->socket_id = (int) p_in->socket_id;
1344 p_out->log_level = app->log_level;
1347 p_out->n_ports_in = p_in->n_pktq_in;
1348 for (i = 0; i < p_in->n_pktq_in; i++) {
1349 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1350 struct pipeline_port_in_params *out = &p_out->port_in[i];
1353 case APP_PKTQ_IN_HWQ:
1355 struct app_pktq_hwq_in_params *p_hwq_in =
1356 &app->hwq_in_params[in->id];
1357 struct app_link_params *p_link =
1358 app_get_link_for_rxq(app, p_hwq_in);
1359 uint32_t rxq_link_id, rxq_queue_id;
1361 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1365 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1366 out->params.ethdev.port_id = p_link->pmd_id;
1367 out->params.ethdev.queue_id = rxq_queue_id;
1368 out->burst_size = p_hwq_in->burst;
1371 case APP_PKTQ_IN_SWQ:
1373 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1375 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1376 if (app_swq_get_readers(app, swq_params) == 1) {
1377 out->type = PIPELINE_PORT_IN_RING_READER;
1378 out->params.ring.ring = app->swq[in->id];
1379 out->burst_size = app->swq_params[in->id].burst_read;
1381 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1382 out->params.ring_multi.ring = app->swq[in->id];
1383 out->burst_size = swq_params->burst_read;
1386 if (swq_params->ipv4_frag == 1) {
1387 struct rte_port_ring_reader_ipv4_frag_params *params =
1388 &out->params.ring_ipv4_frag;
1390 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1391 params->ring = app->swq[in->id];
1392 params->mtu = swq_params->mtu;
1393 params->metadata_size = swq_params->metadata_size;
1394 params->pool_direct =
1395 app->mempool[swq_params->mempool_direct_id];
1396 params->pool_indirect =
1397 app->mempool[swq_params->mempool_indirect_id];
1398 out->burst_size = swq_params->burst_read;
1400 struct rte_port_ring_reader_ipv6_frag_params *params =
1401 &out->params.ring_ipv6_frag;
1403 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1404 params->ring = app->swq[in->id];
1405 params->mtu = swq_params->mtu;
1406 params->metadata_size = swq_params->metadata_size;
1407 params->pool_direct =
1408 app->mempool[swq_params->mempool_direct_id];
1409 params->pool_indirect =
1410 app->mempool[swq_params->mempool_indirect_id];
1411 out->burst_size = swq_params->burst_read;
1416 case APP_PKTQ_IN_TM:
1418 out->type = PIPELINE_PORT_IN_SCHED_READER;
1419 out->params.sched.sched = app->tm[in->id];
1420 out->burst_size = app->tm_params[in->id].burst_read;
1423 #ifdef RTE_EXEC_ENV_LINUXAPP
1424 case APP_PKTQ_IN_TAP:
1426 struct app_pktq_tap_params *tap_params =
1427 &app->tap_params[in->id];
1428 struct app_mempool_params *mempool_params =
1429 &app->mempool_params[tap_params->mempool_id];
1430 struct rte_mempool *mempool =
1431 app->mempool[tap_params->mempool_id];
1433 out->type = PIPELINE_PORT_IN_FD_READER;
1434 out->params.fd.fd = app->tap[in->id];
1435 out->params.fd.mtu = mempool_params->buffer_size;
1436 out->params.fd.mempool = mempool;
1437 out->burst_size = app->tap_params[in->id].burst_read;
1441 #ifdef RTE_LIBRTE_KNI
1442 case APP_PKTQ_IN_KNI:
1444 out->type = PIPELINE_PORT_IN_KNI_READER;
1445 out->params.kni.kni = app->kni[in->id];
1446 out->burst_size = app->kni_params[in->id].burst_read;
1449 #endif /* RTE_LIBRTE_KNI */
1450 case APP_PKTQ_IN_SOURCE:
1452 uint32_t mempool_id =
1453 app->source_params[in->id].mempool_id;
1455 out->type = PIPELINE_PORT_IN_SOURCE;
1456 out->params.source.mempool = app->mempool[mempool_id];
1457 out->burst_size = app->source_params[in->id].burst;
1458 out->params.source.file_name =
1459 app->source_params[in->id].file_name;
1460 out->params.source.n_bytes_per_pkt =
1461 app->source_params[in->id].n_bytes_per_pkt;
1470 p_out->n_ports_out = p_in->n_pktq_out;
1471 for (i = 0; i < p_in->n_pktq_out; i++) {
1472 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1473 struct pipeline_port_out_params *out = &p_out->port_out[i];
1476 case APP_PKTQ_OUT_HWQ:
1478 struct app_pktq_hwq_out_params *p_hwq_out =
1479 &app->hwq_out_params[in->id];
1480 struct app_link_params *p_link =
1481 app_get_link_for_txq(app, p_hwq_out);
1482 uint32_t txq_link_id, txq_queue_id;
1484 sscanf(p_hwq_out->name,
1485 "TXQ%" SCNu32 ".%" SCNu32,
1489 if (p_hwq_out->dropless == 0) {
1490 struct rte_port_ethdev_writer_params *params =
1491 &out->params.ethdev;
1493 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1494 params->port_id = p_link->pmd_id;
1495 params->queue_id = txq_queue_id;
1496 params->tx_burst_sz =
1497 app->hwq_out_params[in->id].burst;
1499 struct rte_port_ethdev_writer_nodrop_params
1500 *params = &out->params.ethdev_nodrop;
1503 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1504 params->port_id = p_link->pmd_id;
1505 params->queue_id = txq_queue_id;
1506 params->tx_burst_sz = p_hwq_out->burst;
1507 params->n_retries = p_hwq_out->n_retries;
1511 case APP_PKTQ_OUT_SWQ:
1513 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1515 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1516 if (app_swq_get_writers(app, swq_params) == 1) {
1517 if (app->swq_params[in->id].dropless == 0) {
1518 struct rte_port_ring_writer_params *params =
1521 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1522 params->ring = app->swq[in->id];
1523 params->tx_burst_sz =
1524 app->swq_params[in->id].burst_write;
1526 struct rte_port_ring_writer_nodrop_params
1527 *params = &out->params.ring_nodrop;
1530 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1531 params->ring = app->swq[in->id];
1532 params->tx_burst_sz =
1533 app->swq_params[in->id].burst_write;
1535 app->swq_params[in->id].n_retries;
1538 if (swq_params->dropless == 0) {
1539 struct rte_port_ring_multi_writer_params *params =
1540 &out->params.ring_multi;
1542 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1543 params->ring = app->swq[in->id];
1544 params->tx_burst_sz = swq_params->burst_write;
1546 struct rte_port_ring_multi_writer_nodrop_params
1547 *params = &out->params.ring_multi_nodrop;
1549 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1550 params->ring = app->swq[in->id];
1551 params->tx_burst_sz = swq_params->burst_write;
1552 params->n_retries = swq_params->n_retries;
1556 if (swq_params->ipv4_ras == 1) {
1557 struct rte_port_ring_writer_ipv4_ras_params *params =
1558 &out->params.ring_ipv4_ras;
1560 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1561 params->ring = app->swq[in->id];
1562 params->tx_burst_sz = swq_params->burst_write;
1564 struct rte_port_ring_writer_ipv6_ras_params *params =
1565 &out->params.ring_ipv6_ras;
1567 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1568 params->ring = app->swq[in->id];
1569 params->tx_burst_sz = swq_params->burst_write;
1574 case APP_PKTQ_OUT_TM:
1576 struct rte_port_sched_writer_params *params =
1579 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1580 params->sched = app->tm[in->id];
1581 params->tx_burst_sz =
1582 app->tm_params[in->id].burst_write;
1585 #ifdef RTE_EXEC_ENV_LINUXAPP
1586 case APP_PKTQ_OUT_TAP:
1588 struct rte_port_fd_writer_params *params =
1591 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1592 params->fd = app->tap[in->id];
1593 params->tx_burst_sz =
1594 app->tap_params[in->id].burst_write;
1598 #ifdef RTE_LIBRTE_KNI
1599 case APP_PKTQ_OUT_KNI:
1601 struct app_pktq_kni_params *p_kni =
1602 &app->kni_params[in->id];
1604 if (p_kni->dropless == 0) {
1605 struct rte_port_kni_writer_params *params =
1608 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1609 params->kni = app->kni[in->id];
1610 params->tx_burst_sz =
1611 app->kni_params[in->id].burst_write;
1613 struct rte_port_kni_writer_nodrop_params
1614 *params = &out->params.kni_nodrop;
1616 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1617 params->kni = app->kni[in->id];
1618 params->tx_burst_sz =
1619 app->kni_params[in->id].burst_write;
1621 app->kni_params[in->id].n_retries;
1625 #endif /* RTE_LIBRTE_KNI */
1626 case APP_PKTQ_OUT_SINK:
1628 out->type = PIPELINE_PORT_OUT_SINK;
1629 out->params.sink.file_name =
1630 app->sink_params[in->id].file_name;
1631 out->params.sink.max_n_pkts =
1632 app->sink_params[in->id].
1643 p_out->n_msgq = p_in->n_msgq_in;
1645 for (i = 0; i < p_in->n_msgq_in; i++)
1646 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1648 for (i = 0; i < p_in->n_msgq_out; i++)
1649 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1652 p_out->n_args = p_in->n_args;
1653 for (i = 0; i < p_in->n_args; i++) {
1654 p_out->args_name[i] = p_in->args_name[i];
1655 p_out->args_value[i] = p_in->args_value[i];
1660 app_init_pipelines(struct app_params *app)
1664 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1665 struct app_pipeline_params *params =
1666 &app->pipeline_params[p_id];
1667 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1668 struct pipeline_type *ptype;
1669 struct pipeline_params pp;
1671 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1673 ptype = app_pipeline_type_find(app, params->type);
1675 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1678 app_pipeline_params_get(app, params, &pp);
1682 if (ptype->be_ops->f_init) {
1683 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1685 if (data->be == NULL)
1686 rte_panic("Pipeline instance \"%s\" back-end "
1687 "init error\n", params->name);
1692 if (ptype->fe_ops->f_init) {
1693 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1695 if (data->fe == NULL)
1696 rte_panic("Pipeline instance \"%s\" front-end "
1697 "init error\n", params->name);
1700 data->ptype = ptype;
1702 data->timer_period = (rte_get_tsc_hz() *
1703 params->timer_period) / 1000;
1708 app_post_init_pipelines(struct app_params *app)
1712 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1713 struct app_pipeline_params *params =
1714 &app->pipeline_params[p_id];
1715 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1718 if (data->ptype->fe_ops->f_post_init == NULL)
1721 status = data->ptype->fe_ops->f_post_init(data->fe);
1723 rte_panic("Pipeline instance \"%s\" front-end "
1724 "post-init error\n", params->name);
1729 app_init_threads(struct app_params *app)
1731 uint64_t time = rte_get_tsc_cycles();
1734 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1735 struct app_pipeline_params *params =
1736 &app->pipeline_params[p_id];
1737 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1738 struct pipeline_type *ptype;
1739 struct app_thread_data *t;
1740 struct app_thread_pipeline_data *p;
1743 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1746 params->hyper_th_id);
1749 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1752 (params->hyper_th_id) ? "h" : "");
1754 t = &app->thread_data[lcore_id];
1756 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1757 t->thread_req_deadline = time + t->timer_period;
1759 t->headroom_cycles = 0;
1760 t->headroom_time = rte_get_tsc_cycles();
1761 t->headroom_ratio = 0.0;
1763 t->msgq_in = app_thread_msgq_in_get(app,
1766 params->hyper_th_id);
1767 if (t->msgq_in == NULL)
1768 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1771 t->msgq_out = app_thread_msgq_out_get(app,
1774 params->hyper_th_id);
1775 if (t->msgq_out == NULL)
1776 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1779 ptype = app_pipeline_type_find(app, params->type);
1781 rte_panic("Init error: Unknown pipeline "
1782 "type \"%s\"\n", params->type);
1784 p = (ptype->be_ops->f_run == NULL) ?
1785 &t->regular[t->n_regular] :
1786 &t->custom[t->n_custom];
1788 p->pipeline_id = p_id;
1790 p->f_run = ptype->be_ops->f_run;
1791 p->f_timer = ptype->be_ops->f_timer;
1792 p->timer_period = data->timer_period;
1793 p->deadline = time + data->timer_period;
1797 if (ptype->be_ops->f_run == NULL)
1804 int app_init(struct app_params *app)
1806 app_init_core_map(app);
1807 app_init_core_mask(app);
1810 app_init_mempool(app);
1818 app_pipeline_common_cmd_push(app);
1819 app_pipeline_thread_cmd_push(app);
1820 app_pipeline_type_register(app, &pipeline_master);
1821 app_pipeline_type_register(app, &pipeline_firewall);
1823 app_init_pipelines(app);
1824 app_init_threads(app);
1829 int app_post_init(struct app_params *app)
1831 app_post_init_pipelines(app);
1837 app_pipeline_type_cmd_push(struct app_params *app,
1838 struct pipeline_type *ptype)
1840 cmdline_parse_ctx_t *cmds;
1843 /* Check input arguments */
1844 if ((app == NULL) ||
1848 n_cmds = pipeline_type_cmds_count(ptype);
1852 cmds = ptype->fe_ops->cmds;
1854 /* Check for available slots in the application commands array */
1855 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1858 /* Push pipeline commands into the application */
1859 memcpy(&app->cmds[app->n_cmds],
1861 n_cmds * sizeof(cmdline_parse_ctx_t));
1863 for (i = 0; i < n_cmds; i++)
1864 app->cmds[app->n_cmds + i]->data = app;
1866 app->n_cmds += n_cmds;
1867 app->cmds[app->n_cmds] = NULL;
1873 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1877 /* Check input arguments */
1878 if ((app == NULL) ||
1880 (ptype->name == NULL) ||
1881 (strlen(ptype->name) == 0) ||
1882 (ptype->be_ops->f_init == NULL) ||
1883 (ptype->be_ops->f_timer == NULL))
1886 /* Check for duplicate entry */
1887 for (i = 0; i < app->n_pipeline_types; i++)
1888 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1891 /* Check for resource availability */
1892 n_cmds = pipeline_type_cmds_count(ptype);
1893 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1894 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1897 /* Copy pipeline type */
1898 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1900 sizeof(struct pipeline_type));
1902 /* Copy CLI commands */
1904 app_pipeline_type_cmd_push(app, ptype);
1910 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1914 for (i = 0; i < app->n_pipeline_types; i++)
1915 if (strcmp(app->pipeline_type[i].name, name) == 0)
1916 return &app->pipeline_type[i];