4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <netinet/in.h>
38 #ifdef RTE_EXEC_ENV_LINUXAPP
40 #include <linux/if_tun.h>
43 #include <sys/ioctl.h>
46 #include <rte_cycles.h>
47 #include <rte_ethdev.h>
48 #include <rte_ether.h>
51 #include <rte_malloc.h>
55 #include "pipeline_common_fe.h"
56 #include "pipeline_master.h"
57 #include "pipeline_passthrough.h"
58 #include "pipeline_firewall.h"
59 #include "pipeline_flow_classification.h"
60 #include "pipeline_flow_actions.h"
61 #include "pipeline_routing.h"
62 #include "thread_fe.h"
64 #define APP_NAME_SIZE 32
66 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
69 app_init_core_map(struct app_params *app)
71 APP_LOG(app, HIGH, "Initializing CPU core map ...");
72 app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
75 if (app->core_map == NULL)
76 rte_panic("Cannot create CPU core map\n");
78 if (app->log_level >= APP_LOG_LEVEL_LOW)
79 cpu_core_map_print(app->core_map);
82 /* Core Mask String in Hex Representation */
83 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
86 app_init_core_mask(struct app_params *app)
89 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
91 for (i = 0; i < app->n_pipelines; i++) {
92 struct app_pipeline_params *p = &app->pipeline_params[i];
95 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
101 rte_panic("Cannot create CPU core mask\n");
103 app_core_enable_in_core_mask(app, lcore_id);
106 app_core_build_core_mask_string(app, core_mask_str);
107 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
111 app_init_eal(struct app_params *app)
114 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
115 struct app_eal_params *p = &app->eal_params;
120 app->eal_argv[n_args++] = strdup(app->app_name);
122 app_core_build_core_mask_string(app, core_mask_str);
123 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
124 app->eal_argv[n_args++] = strdup(buffer);
127 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
128 app->eal_argv[n_args++] = strdup(buffer);
131 if (p->master_lcore_present) {
134 "--master-lcore=%" PRIu32,
136 app->eal_argv[n_args++] = strdup(buffer);
139 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
140 app->eal_argv[n_args++] = strdup(buffer);
142 if (p->memory_present) {
143 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
144 app->eal_argv[n_args++] = strdup(buffer);
147 if (p->ranks_present) {
148 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
149 app->eal_argv[n_args++] = strdup(buffer);
152 for (i = 0; i < APP_MAX_LINKS; i++) {
153 if (p->pci_blacklist[i] == NULL)
158 "--pci-blacklist=%s",
159 p->pci_blacklist[i]);
160 app->eal_argv[n_args++] = strdup(buffer);
163 if (app->port_mask != 0)
164 for (i = 0; i < APP_MAX_LINKS; i++) {
165 if (p->pci_whitelist[i] == NULL)
170 "--pci-whitelist=%s",
171 p->pci_whitelist[i]);
172 app->eal_argv[n_args++] = strdup(buffer);
175 for (i = 0; i < app->n_links; i++) {
176 char *pci_bdf = app->link_params[i].pci_bdf;
180 "--pci-whitelist=%s",
182 app->eal_argv[n_args++] = strdup(buffer);
185 for (i = 0; i < APP_MAX_LINKS; i++) {
186 if (p->vdev[i] == NULL)
193 app->eal_argv[n_args++] = strdup(buffer);
196 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
197 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
198 app->eal_argv[n_args++] = strdup(buffer);
206 app->eal_argv[n_args++] = strdup(buffer);
210 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
211 app->eal_argv[n_args++] = strdup(buffer);
214 if (p->log_level_present) {
217 "--log-level=%" PRIu32,
219 app->eal_argv[n_args++] = strdup(buffer);
222 if ((p->version_present) && p->version) {
223 snprintf(buffer, sizeof(buffer), "-v");
224 app->eal_argv[n_args++] = strdup(buffer);
227 if ((p->help_present) && p->help) {
228 snprintf(buffer, sizeof(buffer), "--help");
229 app->eal_argv[n_args++] = strdup(buffer);
232 if ((p->no_huge_present) && p->no_huge) {
233 snprintf(buffer, sizeof(buffer), "--no-huge");
234 app->eal_argv[n_args++] = strdup(buffer);
237 if ((p->no_pci_present) && p->no_pci) {
238 snprintf(buffer, sizeof(buffer), "--no-pci");
239 app->eal_argv[n_args++] = strdup(buffer);
242 if ((p->no_hpet_present) && p->no_hpet) {
243 snprintf(buffer, sizeof(buffer), "--no-hpet");
244 app->eal_argv[n_args++] = strdup(buffer);
247 if ((p->no_shconf_present) && p->no_shconf) {
248 snprintf(buffer, sizeof(buffer), "--no-shconf");
249 app->eal_argv[n_args++] = strdup(buffer);
253 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
254 app->eal_argv[n_args++] = strdup(buffer);
262 app->eal_argv[n_args++] = strdup(buffer);
266 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
267 app->eal_argv[n_args++] = strdup(buffer);
270 if (p->file_prefix) {
275 app->eal_argv[n_args++] = strdup(buffer);
278 if (p->base_virtaddr) {
281 "--base-virtaddr=%s",
283 app->eal_argv[n_args++] = strdup(buffer);
286 if ((p->create_uio_dev_present) && p->create_uio_dev) {
287 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
288 app->eal_argv[n_args++] = strdup(buffer);
296 app->eal_argv[n_args++] = strdup(buffer);
299 if ((p->xen_dom0_present) && (p->xen_dom0)) {
300 snprintf(buffer, sizeof(buffer), "--xen-dom0");
301 app->eal_argv[n_args++] = strdup(buffer);
304 snprintf(buffer, sizeof(buffer), "--");
305 app->eal_argv[n_args++] = strdup(buffer);
307 app->eal_argc = n_args;
309 APP_LOG(app, HIGH, "Initializing EAL ...");
310 if (app->log_level >= APP_LOG_LEVEL_LOW) {
313 fprintf(stdout, "[APP] EAL arguments: \"");
314 for (i = 1; i < app->eal_argc; i++)
315 fprintf(stdout, "%s ", app->eal_argv[i]);
316 fprintf(stdout, "\"\n");
319 status = rte_eal_init(app->eal_argc, app->eal_argv);
321 rte_panic("EAL init error\n");
325 app_init_mempool(struct app_params *app)
329 for (i = 0; i < app->n_mempools; i++) {
330 struct app_mempool_params *p = &app->mempool_params[i];
332 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
333 app->mempool[i] = rte_pktmbuf_pool_create(
339 sizeof(struct rte_mbuf), /* mbuf data size */
342 if (app->mempool[i] == NULL)
343 rte_panic("%s init error\n", p->name);
348 app_link_filter_arp_add(struct app_link_params *link)
350 struct rte_eth_ethertype_filter filter = {
351 .ether_type = ETHER_TYPE_ARP,
353 .queue = link->arp_q,
356 return rte_eth_dev_filter_ctrl(link->pmd_id,
357 RTE_ETH_FILTER_ETHERTYPE,
363 app_link_filter_tcp_syn_add(struct app_link_params *link)
365 struct rte_eth_syn_filter filter = {
367 .queue = link->tcp_syn_q,
370 return rte_eth_dev_filter_ctrl(link->pmd_id,
377 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
379 struct rte_eth_ntuple_filter filter = {
380 .flags = RTE_5TUPLE_FLAGS,
381 .dst_ip = rte_bswap32(l2->ip),
382 .dst_ip_mask = UINT32_MAX, /* Enable */
384 .src_ip_mask = 0, /* Disable */
386 .dst_port_mask = 0, /* Disable */
388 .src_port_mask = 0, /* Disable */
390 .proto_mask = 0, /* Disable */
392 .priority = 1, /* Lowest */
393 .queue = l1->ip_local_q,
396 return rte_eth_dev_filter_ctrl(l1->pmd_id,
397 RTE_ETH_FILTER_NTUPLE,
403 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
405 struct rte_eth_ntuple_filter filter = {
406 .flags = RTE_5TUPLE_FLAGS,
407 .dst_ip = rte_bswap32(l2->ip),
408 .dst_ip_mask = UINT32_MAX, /* Enable */
410 .src_ip_mask = 0, /* Disable */
412 .dst_port_mask = 0, /* Disable */
414 .src_port_mask = 0, /* Disable */
416 .proto_mask = 0, /* Disable */
418 .priority = 1, /* Lowest */
419 .queue = l1->ip_local_q,
422 return rte_eth_dev_filter_ctrl(l1->pmd_id,
423 RTE_ETH_FILTER_NTUPLE,
424 RTE_ETH_FILTER_DELETE,
429 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
431 struct rte_eth_ntuple_filter filter = {
432 .flags = RTE_5TUPLE_FLAGS,
433 .dst_ip = rte_bswap32(l2->ip),
434 .dst_ip_mask = UINT32_MAX, /* Enable */
436 .src_ip_mask = 0, /* Disable */
438 .dst_port_mask = 0, /* Disable */
440 .src_port_mask = 0, /* Disable */
441 .proto = IPPROTO_TCP,
442 .proto_mask = UINT8_MAX, /* Enable */
444 .priority = 2, /* Higher priority than IP */
445 .queue = l1->tcp_local_q,
448 return rte_eth_dev_filter_ctrl(l1->pmd_id,
449 RTE_ETH_FILTER_NTUPLE,
455 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
457 struct rte_eth_ntuple_filter filter = {
458 .flags = RTE_5TUPLE_FLAGS,
459 .dst_ip = rte_bswap32(l2->ip),
460 .dst_ip_mask = UINT32_MAX, /* Enable */
462 .src_ip_mask = 0, /* Disable */
464 .dst_port_mask = 0, /* Disable */
466 .src_port_mask = 0, /* Disable */
467 .proto = IPPROTO_TCP,
468 .proto_mask = UINT8_MAX, /* Enable */
470 .priority = 2, /* Higher priority than IP */
471 .queue = l1->tcp_local_q,
474 return rte_eth_dev_filter_ctrl(l1->pmd_id,
475 RTE_ETH_FILTER_NTUPLE,
476 RTE_ETH_FILTER_DELETE,
481 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
483 struct rte_eth_ntuple_filter filter = {
484 .flags = RTE_5TUPLE_FLAGS,
485 .dst_ip = rte_bswap32(l2->ip),
486 .dst_ip_mask = UINT32_MAX, /* Enable */
488 .src_ip_mask = 0, /* Disable */
490 .dst_port_mask = 0, /* Disable */
492 .src_port_mask = 0, /* Disable */
493 .proto = IPPROTO_UDP,
494 .proto_mask = UINT8_MAX, /* Enable */
496 .priority = 2, /* Higher priority than IP */
497 .queue = l1->udp_local_q,
500 return rte_eth_dev_filter_ctrl(l1->pmd_id,
501 RTE_ETH_FILTER_NTUPLE,
507 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
509 struct rte_eth_ntuple_filter filter = {
510 .flags = RTE_5TUPLE_FLAGS,
511 .dst_ip = rte_bswap32(l2->ip),
512 .dst_ip_mask = UINT32_MAX, /* Enable */
514 .src_ip_mask = 0, /* Disable */
516 .dst_port_mask = 0, /* Disable */
518 .src_port_mask = 0, /* Disable */
519 .proto = IPPROTO_UDP,
520 .proto_mask = UINT8_MAX, /* Enable */
522 .priority = 2, /* Higher priority than IP */
523 .queue = l1->udp_local_q,
526 return rte_eth_dev_filter_ctrl(l1->pmd_id,
527 RTE_ETH_FILTER_NTUPLE,
528 RTE_ETH_FILTER_DELETE,
533 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
535 struct rte_eth_ntuple_filter filter = {
536 .flags = RTE_5TUPLE_FLAGS,
537 .dst_ip = rte_bswap32(l2->ip),
538 .dst_ip_mask = UINT32_MAX, /* Enable */
540 .src_ip_mask = 0, /* Disable */
542 .dst_port_mask = 0, /* Disable */
544 .src_port_mask = 0, /* Disable */
545 .proto = IPPROTO_SCTP,
546 .proto_mask = UINT8_MAX, /* Enable */
548 .priority = 2, /* Higher priority than IP */
549 .queue = l1->sctp_local_q,
552 return rte_eth_dev_filter_ctrl(l1->pmd_id,
553 RTE_ETH_FILTER_NTUPLE,
559 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
561 struct rte_eth_ntuple_filter filter = {
562 .flags = RTE_5TUPLE_FLAGS,
563 .dst_ip = rte_bswap32(l2->ip),
564 .dst_ip_mask = UINT32_MAX, /* Enable */
566 .src_ip_mask = 0, /* Disable */
568 .dst_port_mask = 0, /* Disable */
570 .src_port_mask = 0, /* Disable */
571 .proto = IPPROTO_SCTP,
572 .proto_mask = UINT8_MAX, /* Enable */
574 .priority = 2, /* Higher priority than IP */
575 .queue = l1->sctp_local_q,
578 return rte_eth_dev_filter_ctrl(l1->pmd_id,
579 RTE_ETH_FILTER_NTUPLE,
580 RTE_ETH_FILTER_DELETE,
585 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
587 if (cp->arp_q != 0) {
588 int status = app_link_filter_arp_add(cp);
590 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
591 "Adding ARP filter (queue = %" PRIu32 ")",
592 cp->name, cp->pmd_id, cp->arp_q);
595 rte_panic("%s (%" PRIu32 "): "
596 "Error adding ARP filter "
597 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
598 cp->name, cp->pmd_id, cp->arp_q, status);
603 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
605 if (cp->tcp_syn_q != 0) {
606 int status = app_link_filter_tcp_syn_add(cp);
608 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
609 "Adding TCP SYN filter (queue = %" PRIu32 ")",
610 cp->name, cp->pmd_id, cp->tcp_syn_q);
613 rte_panic("%s (%" PRIu32 "): "
614 "Error adding TCP SYN filter "
615 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
616 cp->name, cp->pmd_id, cp->tcp_syn_q,
622 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
627 /* For each link, add filters for IP of current link */
629 for (i = 0; i < app->n_links; i++) {
630 struct app_link_params *p = &app->link_params[i];
633 if (p->ip_local_q != 0) {
634 int status = app_link_filter_ip_add(p, cp);
636 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
637 "Adding IP filter (queue= %" PRIu32
638 ", IP = 0x%08" PRIx32 ")",
639 p->name, p->pmd_id, p->ip_local_q,
643 rte_panic("%s (%" PRIu32 "): "
645 "filter (queue= %" PRIu32 ", "
649 p->ip_local_q, cp->ip, status);
653 if (p->tcp_local_q != 0) {
654 int status = app_link_filter_tcp_add(p, cp);
656 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
659 ", IP = 0x%08" PRIx32 ")",
660 p->name, p->pmd_id, p->tcp_local_q,
664 rte_panic("%s (%" PRIu32 "): "
666 "filter (queue = %" PRIu32 ", "
670 p->tcp_local_q, cp->ip, status);
674 if (p->udp_local_q != 0) {
675 int status = app_link_filter_udp_add(p, cp);
677 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
680 ", IP = 0x%08" PRIx32 ")",
681 p->name, p->pmd_id, p->udp_local_q,
685 rte_panic("%s (%" PRIu32 "): "
687 "filter (queue = %" PRIu32 ", "
691 p->udp_local_q, cp->ip, status);
695 if (p->sctp_local_q != 0) {
696 int status = app_link_filter_sctp_add(p, cp);
698 APP_LOG(app, LOW, "%s (%" PRIu32
699 "): Adding SCTP filter "
701 ", IP = 0x%08" PRIx32 ")",
702 p->name, p->pmd_id, p->sctp_local_q,
706 rte_panic("%s (%" PRIu32 "): "
708 "filter (queue = %" PRIu32 ", "
712 p->sctp_local_q, cp->ip,
719 status = rte_eth_dev_set_link_up(cp->pmd_id);
720 /* Do not panic if PMD does not provide link up functionality */
721 if (status < 0 && status != -ENOTSUP)
722 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
723 PRId32 "\n", cp->name, cp->pmd_id, status);
725 /* Mark link as UP */
730 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
736 status = rte_eth_dev_set_link_down(cp->pmd_id);
737 /* Do not panic if PMD does not provide link down functionality */
738 if (status < 0 && status != -ENOTSUP)
739 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
740 PRId32 "\n", cp->name, cp->pmd_id, status);
742 /* Mark link as DOWN */
745 /* Return if current link IP is not valid */
749 /* For each link, remove filters for IP of current link */
750 for (i = 0; i < app->n_links; i++) {
751 struct app_link_params *p = &app->link_params[i];
754 if (p->ip_local_q != 0) {
755 int status = app_link_filter_ip_del(p, cp);
757 APP_LOG(app, LOW, "%s (%" PRIu32
758 "): Deleting IP filter "
759 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
760 p->name, p->pmd_id, p->ip_local_q, cp->ip);
763 rte_panic("%s (%" PRIu32
764 "): Error deleting IP filter "
768 p->name, p->pmd_id, p->ip_local_q,
773 if (p->tcp_local_q != 0) {
774 int status = app_link_filter_tcp_del(p, cp);
776 APP_LOG(app, LOW, "%s (%" PRIu32
777 "): Deleting TCP filter "
779 ", IP = 0x%" PRIx32 ")",
780 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
783 rte_panic("%s (%" PRIu32
784 "): Error deleting TCP filter "
788 p->name, p->pmd_id, p->tcp_local_q,
793 if (p->udp_local_q != 0) {
794 int status = app_link_filter_udp_del(p, cp);
796 APP_LOG(app, LOW, "%s (%" PRIu32
797 "): Deleting UDP filter "
798 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
799 p->name, p->pmd_id, p->udp_local_q, cp->ip);
802 rte_panic("%s (%" PRIu32
803 "): Error deleting UDP filter "
807 p->name, p->pmd_id, p->udp_local_q,
812 if (p->sctp_local_q != 0) {
813 int status = app_link_filter_sctp_del(p, cp);
815 APP_LOG(app, LOW, "%s (%" PRIu32
816 "): Deleting SCTP filter "
818 ", IP = 0x%" PRIx32 ")",
819 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
822 rte_panic("%s (%" PRIu32
823 "): Error deleting SCTP filter "
827 p->name, p->pmd_id, p->sctp_local_q,
834 app_check_link(struct app_params *app)
836 uint32_t all_links_up, i;
840 for (i = 0; i < app->n_links; i++) {
841 struct app_link_params *p = &app->link_params[i];
842 struct rte_eth_link link_params;
844 memset(&link_params, 0, sizeof(link_params));
845 rte_eth_link_get(p->pmd_id, &link_params);
847 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
850 link_params.link_speed / 1000,
851 link_params.link_status ? "UP" : "DOWN");
853 if (link_params.link_status == ETH_LINK_DOWN)
857 if (all_links_up == 0)
858 rte_panic("Some links are DOWN\n");
862 is_any_swq_frag_or_ras(struct app_params *app)
866 for (i = 0; i < app->n_pktq_swq; i++) {
867 struct app_pktq_swq_params *p = &app->swq_params[i];
869 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
870 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
878 app_init_link_frag_ras(struct app_params *app)
882 if (is_any_swq_frag_or_ras(app)) {
883 for (i = 0; i < app->n_pktq_hwq_out; i++) {
884 struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];
886 p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
892 app_get_cpu_socket_id(uint32_t pmd_id)
894 int status = rte_eth_dev_socket_id(pmd_id);
896 return (status != SOCKET_ID_ANY) ? status : 0;
900 app_link_rss_enabled(struct app_link_params *cp)
902 return (cp->n_rss_qs) ? 1 : 0;
906 app_link_rss_setup(struct app_link_params *cp)
908 struct rte_eth_dev_info dev_info;
909 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
914 memset(&dev_info, 0, sizeof(dev_info));
915 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
917 if (dev_info.reta_size == 0)
918 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
919 cp->name, cp->pmd_id);
921 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
922 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
923 cp->name, cp->pmd_id);
925 /* Setup RETA contents */
926 memset(reta_conf, 0, sizeof(reta_conf));
928 for (i = 0; i < dev_info.reta_size; i++)
929 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
931 for (i = 0; i < dev_info.reta_size; i++) {
932 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
933 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
934 uint32_t rss_qs_pos = i % cp->n_rss_qs;
936 reta_conf[reta_id].reta[reta_pos] =
937 (uint16_t) cp->rss_qs[rss_qs_pos];
941 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
945 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
946 cp->name, cp->pmd_id);
950 app_init_link_set_config(struct app_link_params *p)
953 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
954 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
961 app_init_link(struct app_params *app)
965 app_init_link_frag_ras(app);
967 for (i = 0; i < app->n_links; i++) {
968 struct app_link_params *p_link = &app->link_params[i];
969 uint32_t link_id, n_hwq_in, n_hwq_out, j;
972 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
973 n_hwq_in = app_link_get_n_rxq(app, p_link);
974 n_hwq_out = app_link_get_n_txq(app, p_link);
975 app_init_link_set_config(p_link);
977 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
978 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
985 status = rte_eth_dev_configure(
991 rte_panic("%s (%" PRId32 "): "
992 "init error (%" PRId32 ")\n",
993 p_link->name, p_link->pmd_id, status);
995 rte_eth_macaddr_get(p_link->pmd_id,
996 (struct ether_addr *) &p_link->mac_addr);
999 rte_eth_promiscuous_enable(p_link->pmd_id);
1002 for (j = 0; j < app->n_pktq_hwq_in; j++) {
1003 struct app_pktq_hwq_in_params *p_rxq =
1004 &app->hwq_in_params[j];
1005 uint32_t rxq_link_id, rxq_queue_id;
1006 uint16_t nb_rxd = p_rxq->size;
1008 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
1009 &rxq_link_id, &rxq_queue_id);
1010 if (rxq_link_id != link_id)
1013 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1018 rte_panic("%s (%" PRIu32 "): "
1019 "%s adjust number of Rx descriptors "
1020 "error (%" PRId32 ")\n",
1026 status = rte_eth_rx_queue_setup(
1030 app_get_cpu_socket_id(p_link->pmd_id),
1032 app->mempool[p_rxq->mempool_id]);
1034 rte_panic("%s (%" PRIu32 "): "
1035 "%s init error (%" PRId32 ")\n",
1043 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1044 struct app_pktq_hwq_out_params *p_txq =
1045 &app->hwq_out_params[j];
1046 uint32_t txq_link_id, txq_queue_id;
1047 uint16_t nb_txd = p_txq->size;
1049 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1050 &txq_link_id, &txq_queue_id);
1051 if (txq_link_id != link_id)
1054 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1059 rte_panic("%s (%" PRIu32 "): "
1060 "%s adjust number of Tx descriptors "
1061 "error (%" PRId32 ")\n",
1067 status = rte_eth_tx_queue_setup(
1071 app_get_cpu_socket_id(p_link->pmd_id),
1074 rte_panic("%s (%" PRIu32 "): "
1075 "%s init error (%" PRId32 ")\n",
1083 status = rte_eth_dev_start(p_link->pmd_id);
1085 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1086 p_link->name, status);
1089 app_link_set_arp_filter(app, p_link);
1090 app_link_set_tcp_syn_filter(app, p_link);
1091 if (app_link_rss_enabled(p_link))
1092 app_link_rss_setup(p_link);
1095 app_link_up_internal(app, p_link);
1098 app_check_link(app);
1102 app_init_swq(struct app_params *app)
1106 for (i = 0; i < app->n_pktq_swq; i++) {
1107 struct app_pktq_swq_params *p = &app->swq_params[i];
1110 if (app_swq_get_readers(app, p) == 1)
1111 flags |= RING_F_SC_DEQ;
1112 if (app_swq_get_writers(app, p) == 1)
1113 flags |= RING_F_SP_ENQ;
1115 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1116 app->swq[i] = rte_ring_create(
1122 if (app->swq[i] == NULL)
1123 rte_panic("%s init error\n", p->name);
1128 app_init_tm(struct app_params *app)
1132 for (i = 0; i < app->n_pktq_tm; i++) {
1133 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1134 struct app_link_params *p_link;
1135 struct rte_eth_link link_eth_params;
1136 struct rte_sched_port *sched;
1137 uint32_t n_subports, subport_id;
1140 p_link = app_get_link_for_tm(app, p_tm);
1142 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1145 p_tm->sched_port_params.name = p_tm->name;
1146 p_tm->sched_port_params.socket =
1147 app_get_cpu_socket_id(p_link->pmd_id);
1148 p_tm->sched_port_params.rate =
1149 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1151 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1152 sched = rte_sched_port_config(&p_tm->sched_port_params);
1154 rte_panic("%s init error\n", p_tm->name);
1158 n_subports = p_tm->sched_port_params.n_subports_per_port;
1159 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1160 uint32_t n_pipes_per_subport, pipe_id;
1162 status = rte_sched_subport_config(sched,
1164 &p_tm->sched_subport_params[subport_id]);
1166 rte_panic("%s subport %" PRIu32
1167 " init error (%" PRId32 ")\n",
1168 p_tm->name, subport_id, status);
1171 n_pipes_per_subport =
1172 p_tm->sched_port_params.n_pipes_per_subport;
1174 pipe_id < n_pipes_per_subport;
1176 int profile_id = p_tm->sched_pipe_to_profile[
1177 subport_id * APP_MAX_SCHED_PIPES +
1180 if (profile_id == -1)
1183 status = rte_sched_pipe_config(sched,
1188 rte_panic("%s subport %" PRIu32
1190 " (profile %" PRId32 ") "
1191 "init error (% " PRId32 ")\n",
1192 p_tm->name, subport_id, pipe_id,
1193 profile_id, status);
1199 #ifndef RTE_EXEC_ENV_LINUXAPP
1201 app_init_tap(struct app_params *app) {
1202 if (app->n_pktq_tap == 0)
1205 rte_panic("TAP device not supported.\n");
1209 app_init_tap(struct app_params *app)
1213 for (i = 0; i < app->n_pktq_tap; i++) {
1214 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1218 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1220 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1222 rte_panic("Cannot open file /dev/net/tun\n");
1224 memset(&ifr, 0, sizeof(ifr));
1225 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1226 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1228 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1230 rte_panic("TAP setup error\n");
1237 #ifdef RTE_LIBRTE_KNI
1239 kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
1242 if (port_id >= rte_eth_dev_count())
1246 rte_eth_dev_set_link_up(port_id) :
1247 rte_eth_dev_set_link_down(port_id);
1253 kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
1256 if (port_id >= rte_eth_dev_count())
1259 if (new_mtu > ETHER_MAX_LEN)
1263 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1269 #endif /* RTE_LIBRTE_KNI */
1271 #ifndef RTE_LIBRTE_KNI
1273 app_init_kni(struct app_params *app) {
1274 if (app->n_pktq_kni == 0)
1277 rte_panic("Can not init KNI without librte_kni support.\n");
1281 app_init_kni(struct app_params *app) {
1284 if (app->n_pktq_kni == 0)
1287 rte_kni_init(app->n_pktq_kni);
1289 for (i = 0; i < app->n_pktq_kni; i++) {
1290 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1291 struct app_link_params *p_link;
1292 struct rte_eth_dev_info dev_info;
1293 struct app_mempool_params *mempool_params;
1294 struct rte_mempool *mempool;
1295 struct rte_kni_conf conf;
1296 struct rte_kni_ops ops;
1299 p_link = app_get_link_for_kni(app, p_kni);
1300 memset(&dev_info, 0, sizeof(dev_info));
1301 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1304 mempool_params = &app->mempool_params[p_kni->mempool_id];
1305 mempool = app->mempool[p_kni->mempool_id];
1308 memset(&conf, 0, sizeof(conf));
1309 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1310 conf.force_bind = p_kni->force_bind;
1311 if (conf.force_bind) {
1314 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1317 p_kni->hyper_th_id);
1320 rte_panic("%s invalid CPU core\n", p_kni->name);
1322 conf.core_id = (uint32_t) lcore_id;
1324 conf.group_id = p_link->pmd_id;
1325 conf.mbuf_size = mempool_params->buffer_size;
1326 conf.addr = dev_info.pci_dev->addr;
1327 conf.id = dev_info.pci_dev->id;
1329 memset(&ops, 0, sizeof(ops));
1330 ops.port_id = (uint8_t) p_link->pmd_id;
1331 ops.change_mtu = kni_change_mtu;
1332 ops.config_network_if = kni_config_network_interface;
1334 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1335 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1337 rte_panic("%s init error\n", p_kni->name);
1340 #endif /* RTE_LIBRTE_KNI */
1343 app_init_msgq(struct app_params *app)
1347 for (i = 0; i < app->n_msgq; i++) {
1348 struct app_msgq_params *p = &app->msgq_params[i];
1350 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1351 app->msgq[i] = rte_ring_create(
1355 RING_F_SP_ENQ | RING_F_SC_DEQ);
1357 if (app->msgq[i] == NULL)
1358 rte_panic("%s init error\n", p->name);
1362 void app_pipeline_params_get(struct app_params *app,
1363 struct app_pipeline_params *p_in,
1364 struct pipeline_params *p_out)
1368 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1370 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1372 p_out->socket_id = (int) p_in->socket_id;
1374 p_out->log_level = app->log_level;
1377 p_out->n_ports_in = p_in->n_pktq_in;
1378 for (i = 0; i < p_in->n_pktq_in; i++) {
1379 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1380 struct pipeline_port_in_params *out = &p_out->port_in[i];
1383 case APP_PKTQ_IN_HWQ:
1385 struct app_pktq_hwq_in_params *p_hwq_in =
1386 &app->hwq_in_params[in->id];
1387 struct app_link_params *p_link =
1388 app_get_link_for_rxq(app, p_hwq_in);
1389 uint32_t rxq_link_id, rxq_queue_id;
1391 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1395 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1396 out->params.ethdev.port_id = p_link->pmd_id;
1397 out->params.ethdev.queue_id = rxq_queue_id;
1398 out->burst_size = p_hwq_in->burst;
1401 case APP_PKTQ_IN_SWQ:
1403 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1405 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1406 if (app_swq_get_readers(app, swq_params) == 1) {
1407 out->type = PIPELINE_PORT_IN_RING_READER;
1408 out->params.ring.ring = app->swq[in->id];
1409 out->burst_size = app->swq_params[in->id].burst_read;
1411 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1412 out->params.ring_multi.ring = app->swq[in->id];
1413 out->burst_size = swq_params->burst_read;
1416 if (swq_params->ipv4_frag == 1) {
1417 struct rte_port_ring_reader_ipv4_frag_params *params =
1418 &out->params.ring_ipv4_frag;
1420 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1421 params->ring = app->swq[in->id];
1422 params->mtu = swq_params->mtu;
1423 params->metadata_size = swq_params->metadata_size;
1424 params->pool_direct =
1425 app->mempool[swq_params->mempool_direct_id];
1426 params->pool_indirect =
1427 app->mempool[swq_params->mempool_indirect_id];
1428 out->burst_size = swq_params->burst_read;
1430 struct rte_port_ring_reader_ipv6_frag_params *params =
1431 &out->params.ring_ipv6_frag;
1433 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1434 params->ring = app->swq[in->id];
1435 params->mtu = swq_params->mtu;
1436 params->metadata_size = swq_params->metadata_size;
1437 params->pool_direct =
1438 app->mempool[swq_params->mempool_direct_id];
1439 params->pool_indirect =
1440 app->mempool[swq_params->mempool_indirect_id];
1441 out->burst_size = swq_params->burst_read;
1446 case APP_PKTQ_IN_TM:
1448 out->type = PIPELINE_PORT_IN_SCHED_READER;
1449 out->params.sched.sched = app->tm[in->id];
1450 out->burst_size = app->tm_params[in->id].burst_read;
1453 #ifdef RTE_EXEC_ENV_LINUXAPP
1454 case APP_PKTQ_IN_TAP:
1456 struct app_pktq_tap_params *tap_params =
1457 &app->tap_params[in->id];
1458 struct app_mempool_params *mempool_params =
1459 &app->mempool_params[tap_params->mempool_id];
1460 struct rte_mempool *mempool =
1461 app->mempool[tap_params->mempool_id];
1463 out->type = PIPELINE_PORT_IN_FD_READER;
1464 out->params.fd.fd = app->tap[in->id];
1465 out->params.fd.mtu = mempool_params->buffer_size;
1466 out->params.fd.mempool = mempool;
1467 out->burst_size = app->tap_params[in->id].burst_read;
1471 #ifdef RTE_LIBRTE_KNI
1472 case APP_PKTQ_IN_KNI:
1474 out->type = PIPELINE_PORT_IN_KNI_READER;
1475 out->params.kni.kni = app->kni[in->id];
1476 out->burst_size = app->kni_params[in->id].burst_read;
1479 #endif /* RTE_LIBRTE_KNI */
1480 case APP_PKTQ_IN_SOURCE:
1482 uint32_t mempool_id =
1483 app->source_params[in->id].mempool_id;
1485 out->type = PIPELINE_PORT_IN_SOURCE;
1486 out->params.source.mempool = app->mempool[mempool_id];
1487 out->burst_size = app->source_params[in->id].burst;
1488 out->params.source.file_name =
1489 app->source_params[in->id].file_name;
1490 out->params.source.n_bytes_per_pkt =
1491 app->source_params[in->id].n_bytes_per_pkt;
1500 p_out->n_ports_out = p_in->n_pktq_out;
1501 for (i = 0; i < p_in->n_pktq_out; i++) {
1502 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1503 struct pipeline_port_out_params *out = &p_out->port_out[i];
1506 case APP_PKTQ_OUT_HWQ:
1508 struct app_pktq_hwq_out_params *p_hwq_out =
1509 &app->hwq_out_params[in->id];
1510 struct app_link_params *p_link =
1511 app_get_link_for_txq(app, p_hwq_out);
1512 uint32_t txq_link_id, txq_queue_id;
1514 sscanf(p_hwq_out->name,
1515 "TXQ%" SCNu32 ".%" SCNu32,
1519 if (p_hwq_out->dropless == 0) {
1520 struct rte_port_ethdev_writer_params *params =
1521 &out->params.ethdev;
1523 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1524 params->port_id = p_link->pmd_id;
1525 params->queue_id = txq_queue_id;
1526 params->tx_burst_sz =
1527 app->hwq_out_params[in->id].burst;
1529 struct rte_port_ethdev_writer_nodrop_params
1530 *params = &out->params.ethdev_nodrop;
1533 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1534 params->port_id = p_link->pmd_id;
1535 params->queue_id = txq_queue_id;
1536 params->tx_burst_sz = p_hwq_out->burst;
1537 params->n_retries = p_hwq_out->n_retries;
1541 case APP_PKTQ_OUT_SWQ:
1543 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1545 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1546 if (app_swq_get_writers(app, swq_params) == 1) {
1547 if (app->swq_params[in->id].dropless == 0) {
1548 struct rte_port_ring_writer_params *params =
1551 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1552 params->ring = app->swq[in->id];
1553 params->tx_burst_sz =
1554 app->swq_params[in->id].burst_write;
1556 struct rte_port_ring_writer_nodrop_params
1557 *params = &out->params.ring_nodrop;
1560 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1561 params->ring = app->swq[in->id];
1562 params->tx_burst_sz =
1563 app->swq_params[in->id].burst_write;
1565 app->swq_params[in->id].n_retries;
1568 if (swq_params->dropless == 0) {
1569 struct rte_port_ring_multi_writer_params *params =
1570 &out->params.ring_multi;
1572 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1573 params->ring = app->swq[in->id];
1574 params->tx_burst_sz = swq_params->burst_write;
1576 struct rte_port_ring_multi_writer_nodrop_params
1577 *params = &out->params.ring_multi_nodrop;
1579 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1580 params->ring = app->swq[in->id];
1581 params->tx_burst_sz = swq_params->burst_write;
1582 params->n_retries = swq_params->n_retries;
1586 if (swq_params->ipv4_ras == 1) {
1587 struct rte_port_ring_writer_ipv4_ras_params *params =
1588 &out->params.ring_ipv4_ras;
1590 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1591 params->ring = app->swq[in->id];
1592 params->tx_burst_sz = swq_params->burst_write;
1594 struct rte_port_ring_writer_ipv6_ras_params *params =
1595 &out->params.ring_ipv6_ras;
1597 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1598 params->ring = app->swq[in->id];
1599 params->tx_burst_sz = swq_params->burst_write;
1604 case APP_PKTQ_OUT_TM:
1606 struct rte_port_sched_writer_params *params =
1609 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1610 params->sched = app->tm[in->id];
1611 params->tx_burst_sz =
1612 app->tm_params[in->id].burst_write;
1615 #ifdef RTE_EXEC_ENV_LINUXAPP
1616 case APP_PKTQ_OUT_TAP:
1618 struct rte_port_fd_writer_params *params =
1621 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1622 params->fd = app->tap[in->id];
1623 params->tx_burst_sz =
1624 app->tap_params[in->id].burst_write;
1628 #ifdef RTE_LIBRTE_KNI
1629 case APP_PKTQ_OUT_KNI:
1631 struct app_pktq_kni_params *p_kni =
1632 &app->kni_params[in->id];
1634 if (p_kni->dropless == 0) {
1635 struct rte_port_kni_writer_params *params =
1638 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1639 params->kni = app->kni[in->id];
1640 params->tx_burst_sz =
1641 app->kni_params[in->id].burst_write;
1643 struct rte_port_kni_writer_nodrop_params
1644 *params = &out->params.kni_nodrop;
1646 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1647 params->kni = app->kni[in->id];
1648 params->tx_burst_sz =
1649 app->kni_params[in->id].burst_write;
1651 app->kni_params[in->id].n_retries;
1655 #endif /* RTE_LIBRTE_KNI */
1656 case APP_PKTQ_OUT_SINK:
1658 out->type = PIPELINE_PORT_OUT_SINK;
1659 out->params.sink.file_name =
1660 app->sink_params[in->id].file_name;
1661 out->params.sink.max_n_pkts =
1662 app->sink_params[in->id].
1673 p_out->n_msgq = p_in->n_msgq_in;
1675 for (i = 0; i < p_in->n_msgq_in; i++)
1676 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1678 for (i = 0; i < p_in->n_msgq_out; i++)
1679 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1682 p_out->n_args = p_in->n_args;
1683 for (i = 0; i < p_in->n_args; i++) {
1684 p_out->args_name[i] = p_in->args_name[i];
1685 p_out->args_value[i] = p_in->args_value[i];
1690 app_init_pipelines(struct app_params *app)
1694 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1695 struct app_pipeline_params *params =
1696 &app->pipeline_params[p_id];
1697 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1698 struct pipeline_type *ptype;
1699 struct pipeline_params pp;
1701 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1703 ptype = app_pipeline_type_find(app, params->type);
1705 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1708 app_pipeline_params_get(app, params, &pp);
1712 if (ptype->be_ops->f_init) {
1713 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1715 if (data->be == NULL)
1716 rte_panic("Pipeline instance \"%s\" back-end "
1717 "init error\n", params->name);
1722 if (ptype->fe_ops->f_init) {
1723 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1725 if (data->fe == NULL)
1726 rte_panic("Pipeline instance \"%s\" front-end "
1727 "init error\n", params->name);
1730 data->ptype = ptype;
1732 data->timer_period = (rte_get_tsc_hz() *
1733 params->timer_period) / 100;
1738 app_post_init_pipelines(struct app_params *app)
1742 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1743 struct app_pipeline_params *params =
1744 &app->pipeline_params[p_id];
1745 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1748 if (data->ptype->fe_ops->f_post_init == NULL)
1751 status = data->ptype->fe_ops->f_post_init(data->fe);
1753 rte_panic("Pipeline instance \"%s\" front-end "
1754 "post-init error\n", params->name);
1759 app_init_threads(struct app_params *app)
1761 uint64_t time = rte_get_tsc_cycles();
1764 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1765 struct app_pipeline_params *params =
1766 &app->pipeline_params[p_id];
1767 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1768 struct pipeline_type *ptype;
1769 struct app_thread_data *t;
1770 struct app_thread_pipeline_data *p;
1773 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1776 params->hyper_th_id);
1779 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1782 (params->hyper_th_id) ? "h" : "");
1784 t = &app->thread_data[lcore_id];
1786 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1787 t->thread_req_deadline = time + t->timer_period;
1789 t->headroom_cycles = 0;
1790 t->headroom_time = rte_get_tsc_cycles();
1791 t->headroom_ratio = 0.0;
1793 t->msgq_in = app_thread_msgq_in_get(app,
1796 params->hyper_th_id);
1797 if (t->msgq_in == NULL)
1798 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1801 t->msgq_out = app_thread_msgq_out_get(app,
1804 params->hyper_th_id);
1805 if (t->msgq_out == NULL)
1806 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1809 ptype = app_pipeline_type_find(app, params->type);
1811 rte_panic("Init error: Unknown pipeline "
1812 "type \"%s\"\n", params->type);
1814 p = (ptype->be_ops->f_run == NULL) ?
1815 &t->regular[t->n_regular] :
1816 &t->custom[t->n_custom];
1818 p->pipeline_id = p_id;
1820 p->f_run = ptype->be_ops->f_run;
1821 p->f_timer = ptype->be_ops->f_timer;
1822 p->timer_period = data->timer_period;
1823 p->deadline = time + data->timer_period;
1827 if (ptype->be_ops->f_run == NULL)
1834 int app_init(struct app_params *app)
1836 app_init_core_map(app);
1837 app_init_core_mask(app);
1840 app_init_mempool(app);
1848 app_pipeline_common_cmd_push(app);
1849 app_pipeline_thread_cmd_push(app);
1850 app_pipeline_type_register(app, &pipeline_master);
1851 app_pipeline_type_register(app, &pipeline_passthrough);
1852 app_pipeline_type_register(app, &pipeline_flow_classification);
1853 app_pipeline_type_register(app, &pipeline_flow_actions);
1854 app_pipeline_type_register(app, &pipeline_firewall);
1855 app_pipeline_type_register(app, &pipeline_routing);
1857 app_init_pipelines(app);
1858 app_init_threads(app);
1863 int app_post_init(struct app_params *app)
1865 app_post_init_pipelines(app);
1871 app_pipeline_type_cmd_push(struct app_params *app,
1872 struct pipeline_type *ptype)
1874 cmdline_parse_ctx_t *cmds;
1877 /* Check input arguments */
1878 if ((app == NULL) ||
1882 n_cmds = pipeline_type_cmds_count(ptype);
1886 cmds = ptype->fe_ops->cmds;
1888 /* Check for available slots in the application commands array */
1889 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1892 /* Push pipeline commands into the application */
1893 memcpy(&app->cmds[app->n_cmds],
1895 n_cmds * sizeof(cmdline_parse_ctx_t));
1897 for (i = 0; i < n_cmds; i++)
1898 app->cmds[app->n_cmds + i]->data = app;
1900 app->n_cmds += n_cmds;
1901 app->cmds[app->n_cmds] = NULL;
1907 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1911 /* Check input arguments */
1912 if ((app == NULL) ||
1914 (ptype->name == NULL) ||
1915 (strlen(ptype->name) == 0) ||
1916 (ptype->be_ops->f_init == NULL) ||
1917 (ptype->be_ops->f_timer == NULL))
1920 /* Check for duplicate entry */
1921 for (i = 0; i < app->n_pipeline_types; i++)
1922 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1925 /* Check for resource availability */
1926 n_cmds = pipeline_type_cmds_count(ptype);
1927 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1928 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1931 /* Copy pipeline type */
1932 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1934 sizeof(struct pipeline_type));
1936 /* Copy CLI commands */
1938 app_pipeline_type_cmd_push(app, ptype);
1944 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1948 for (i = 0; i < app->n_pipeline_types; i++)
1949 if (strcmp(app->pipeline_type[i].name, name) == 0)
1950 return &app->pipeline_type[i];