4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <netinet/in.h>
38 #ifdef RTE_EXEC_ENV_LINUXAPP
40 #include <linux/if_tun.h>
43 #include <sys/ioctl.h>
46 #include <rte_cycles.h>
47 #include <rte_ethdev.h>
48 #include <rte_ether.h>
51 #include <rte_malloc.h>
55 #include "pipeline_common_fe.h"
56 #include "pipeline_master.h"
57 #include "pipeline_passthrough.h"
58 #include "pipeline_firewall.h"
59 #include "pipeline_flow_classification.h"
60 #include "pipeline_flow_actions.h"
61 #include "pipeline_routing.h"
62 #include "thread_fe.h"
64 #define APP_NAME_SIZE 32
66 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
69 app_init_core_map(struct app_params *app)
71 APP_LOG(app, HIGH, "Initializing CPU core map ...");
72 app->core_map = cpu_core_map_init(4, 32, 4, 0);
74 if (app->core_map == NULL)
75 rte_panic("Cannot create CPU core map\n");
77 if (app->log_level >= APP_LOG_LEVEL_LOW)
78 cpu_core_map_print(app->core_map);
81 /* Core Mask String in Hex Representation */
82 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
85 app_init_core_mask(struct app_params *app)
88 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
90 for (i = 0; i < app->n_pipelines; i++) {
91 struct app_pipeline_params *p = &app->pipeline_params[i];
94 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
100 rte_panic("Cannot create CPU core mask\n");
102 app_core_enable_in_core_mask(app, lcore_id);
105 app_core_build_core_mask_string(app, core_mask_str);
106 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
110 app_init_eal(struct app_params *app)
113 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
114 struct app_eal_params *p = &app->eal_params;
119 app->eal_argv[n_args++] = strdup(app->app_name);
121 app_core_build_core_mask_string(app, core_mask_str);
122 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
123 app->eal_argv[n_args++] = strdup(buffer);
126 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
127 app->eal_argv[n_args++] = strdup(buffer);
130 if (p->master_lcore_present) {
133 "--master-lcore=%" PRIu32,
135 app->eal_argv[n_args++] = strdup(buffer);
138 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
139 app->eal_argv[n_args++] = strdup(buffer);
141 if (p->memory_present) {
142 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
143 app->eal_argv[n_args++] = strdup(buffer);
146 if (p->ranks_present) {
147 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
148 app->eal_argv[n_args++] = strdup(buffer);
151 for (i = 0; i < APP_MAX_LINKS; i++) {
152 if (p->pci_blacklist[i] == NULL)
157 "--pci-blacklist=%s",
158 p->pci_blacklist[i]);
159 app->eal_argv[n_args++] = strdup(buffer);
162 if (app->port_mask != 0)
163 for (i = 0; i < APP_MAX_LINKS; i++) {
164 if (p->pci_whitelist[i] == NULL)
169 "--pci-whitelist=%s",
170 p->pci_whitelist[i]);
171 app->eal_argv[n_args++] = strdup(buffer);
174 for (i = 0; i < app->n_links; i++) {
175 char *pci_bdf = app->link_params[i].pci_bdf;
179 "--pci-whitelist=%s",
181 app->eal_argv[n_args++] = strdup(buffer);
184 for (i = 0; i < APP_MAX_LINKS; i++) {
185 if (p->vdev[i] == NULL)
192 app->eal_argv[n_args++] = strdup(buffer);
195 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
196 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
197 app->eal_argv[n_args++] = strdup(buffer);
205 app->eal_argv[n_args++] = strdup(buffer);
209 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
210 app->eal_argv[n_args++] = strdup(buffer);
213 if (p->log_level_present) {
216 "--log-level=%" PRIu32,
218 app->eal_argv[n_args++] = strdup(buffer);
221 if ((p->version_present) && p->version) {
222 snprintf(buffer, sizeof(buffer), "-v");
223 app->eal_argv[n_args++] = strdup(buffer);
226 if ((p->help_present) && p->help) {
227 snprintf(buffer, sizeof(buffer), "--help");
228 app->eal_argv[n_args++] = strdup(buffer);
231 if ((p->no_huge_present) && p->no_huge) {
232 snprintf(buffer, sizeof(buffer), "--no-huge");
233 app->eal_argv[n_args++] = strdup(buffer);
236 if ((p->no_pci_present) && p->no_pci) {
237 snprintf(buffer, sizeof(buffer), "--no-pci");
238 app->eal_argv[n_args++] = strdup(buffer);
241 if ((p->no_hpet_present) && p->no_hpet) {
242 snprintf(buffer, sizeof(buffer), "--no-hpet");
243 app->eal_argv[n_args++] = strdup(buffer);
246 if ((p->no_shconf_present) && p->no_shconf) {
247 snprintf(buffer, sizeof(buffer), "--no-shconf");
248 app->eal_argv[n_args++] = strdup(buffer);
252 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
253 app->eal_argv[n_args++] = strdup(buffer);
261 app->eal_argv[n_args++] = strdup(buffer);
265 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
266 app->eal_argv[n_args++] = strdup(buffer);
269 if (p->file_prefix) {
274 app->eal_argv[n_args++] = strdup(buffer);
277 if (p->base_virtaddr) {
280 "--base-virtaddr=%s",
282 app->eal_argv[n_args++] = strdup(buffer);
285 if ((p->create_uio_dev_present) && p->create_uio_dev) {
286 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
287 app->eal_argv[n_args++] = strdup(buffer);
295 app->eal_argv[n_args++] = strdup(buffer);
298 if ((p->xen_dom0_present) && (p->xen_dom0)) {
299 snprintf(buffer, sizeof(buffer), "--xen-dom0");
300 app->eal_argv[n_args++] = strdup(buffer);
303 snprintf(buffer, sizeof(buffer), "--");
304 app->eal_argv[n_args++] = strdup(buffer);
306 app->eal_argc = n_args;
308 APP_LOG(app, HIGH, "Initializing EAL ...");
309 if (app->log_level >= APP_LOG_LEVEL_LOW) {
312 fprintf(stdout, "[APP] EAL arguments: \"");
313 for (i = 1; i < app->eal_argc; i++)
314 fprintf(stdout, "%s ", app->eal_argv[i]);
315 fprintf(stdout, "\"\n");
318 status = rte_eal_init(app->eal_argc, app->eal_argv);
320 rte_panic("EAL init error\n");
324 app_init_mempool(struct app_params *app)
328 for (i = 0; i < app->n_mempools; i++) {
329 struct app_mempool_params *p = &app->mempool_params[i];
331 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
332 app->mempool[i] = rte_mempool_create(
337 sizeof(struct rte_pktmbuf_pool_private),
338 rte_pktmbuf_pool_init, NULL,
339 rte_pktmbuf_init, NULL,
343 if (app->mempool[i] == NULL)
344 rte_panic("%s init error\n", p->name);
349 app_link_filter_arp_add(struct app_link_params *link)
351 struct rte_eth_ethertype_filter filter = {
352 .ether_type = ETHER_TYPE_ARP,
354 .queue = link->arp_q,
357 return rte_eth_dev_filter_ctrl(link->pmd_id,
358 RTE_ETH_FILTER_ETHERTYPE,
364 app_link_filter_tcp_syn_add(struct app_link_params *link)
366 struct rte_eth_syn_filter filter = {
368 .queue = link->tcp_syn_q,
371 return rte_eth_dev_filter_ctrl(link->pmd_id,
378 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
380 struct rte_eth_ntuple_filter filter = {
381 .flags = RTE_5TUPLE_FLAGS,
382 .dst_ip = rte_bswap32(l2->ip),
383 .dst_ip_mask = UINT32_MAX, /* Enable */
385 .src_ip_mask = 0, /* Disable */
387 .dst_port_mask = 0, /* Disable */
389 .src_port_mask = 0, /* Disable */
391 .proto_mask = 0, /* Disable */
393 .priority = 1, /* Lowest */
394 .queue = l1->ip_local_q,
397 return rte_eth_dev_filter_ctrl(l1->pmd_id,
398 RTE_ETH_FILTER_NTUPLE,
404 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
406 struct rte_eth_ntuple_filter filter = {
407 .flags = RTE_5TUPLE_FLAGS,
408 .dst_ip = rte_bswap32(l2->ip),
409 .dst_ip_mask = UINT32_MAX, /* Enable */
411 .src_ip_mask = 0, /* Disable */
413 .dst_port_mask = 0, /* Disable */
415 .src_port_mask = 0, /* Disable */
417 .proto_mask = 0, /* Disable */
419 .priority = 1, /* Lowest */
420 .queue = l1->ip_local_q,
423 return rte_eth_dev_filter_ctrl(l1->pmd_id,
424 RTE_ETH_FILTER_NTUPLE,
425 RTE_ETH_FILTER_DELETE,
430 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
432 struct rte_eth_ntuple_filter filter = {
433 .flags = RTE_5TUPLE_FLAGS,
434 .dst_ip = rte_bswap32(l2->ip),
435 .dst_ip_mask = UINT32_MAX, /* Enable */
437 .src_ip_mask = 0, /* Disable */
439 .dst_port_mask = 0, /* Disable */
441 .src_port_mask = 0, /* Disable */
442 .proto = IPPROTO_TCP,
443 .proto_mask = UINT8_MAX, /* Enable */
445 .priority = 2, /* Higher priority than IP */
446 .queue = l1->tcp_local_q,
449 return rte_eth_dev_filter_ctrl(l1->pmd_id,
450 RTE_ETH_FILTER_NTUPLE,
456 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
458 struct rte_eth_ntuple_filter filter = {
459 .flags = RTE_5TUPLE_FLAGS,
460 .dst_ip = rte_bswap32(l2->ip),
461 .dst_ip_mask = UINT32_MAX, /* Enable */
463 .src_ip_mask = 0, /* Disable */
465 .dst_port_mask = 0, /* Disable */
467 .src_port_mask = 0, /* Disable */
468 .proto = IPPROTO_TCP,
469 .proto_mask = UINT8_MAX, /* Enable */
471 .priority = 2, /* Higher priority than IP */
472 .queue = l1->tcp_local_q,
475 return rte_eth_dev_filter_ctrl(l1->pmd_id,
476 RTE_ETH_FILTER_NTUPLE,
477 RTE_ETH_FILTER_DELETE,
482 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
484 struct rte_eth_ntuple_filter filter = {
485 .flags = RTE_5TUPLE_FLAGS,
486 .dst_ip = rte_bswap32(l2->ip),
487 .dst_ip_mask = UINT32_MAX, /* Enable */
489 .src_ip_mask = 0, /* Disable */
491 .dst_port_mask = 0, /* Disable */
493 .src_port_mask = 0, /* Disable */
494 .proto = IPPROTO_UDP,
495 .proto_mask = UINT8_MAX, /* Enable */
497 .priority = 2, /* Higher priority than IP */
498 .queue = l1->udp_local_q,
501 return rte_eth_dev_filter_ctrl(l1->pmd_id,
502 RTE_ETH_FILTER_NTUPLE,
508 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
510 struct rte_eth_ntuple_filter filter = {
511 .flags = RTE_5TUPLE_FLAGS,
512 .dst_ip = rte_bswap32(l2->ip),
513 .dst_ip_mask = UINT32_MAX, /* Enable */
515 .src_ip_mask = 0, /* Disable */
517 .dst_port_mask = 0, /* Disable */
519 .src_port_mask = 0, /* Disable */
520 .proto = IPPROTO_UDP,
521 .proto_mask = UINT8_MAX, /* Enable */
523 .priority = 2, /* Higher priority than IP */
524 .queue = l1->udp_local_q,
527 return rte_eth_dev_filter_ctrl(l1->pmd_id,
528 RTE_ETH_FILTER_NTUPLE,
529 RTE_ETH_FILTER_DELETE,
534 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
536 struct rte_eth_ntuple_filter filter = {
537 .flags = RTE_5TUPLE_FLAGS,
538 .dst_ip = rte_bswap32(l2->ip),
539 .dst_ip_mask = UINT32_MAX, /* Enable */
541 .src_ip_mask = 0, /* Disable */
543 .dst_port_mask = 0, /* Disable */
545 .src_port_mask = 0, /* Disable */
546 .proto = IPPROTO_SCTP,
547 .proto_mask = UINT8_MAX, /* Enable */
549 .priority = 2, /* Higher priority than IP */
550 .queue = l1->sctp_local_q,
553 return rte_eth_dev_filter_ctrl(l1->pmd_id,
554 RTE_ETH_FILTER_NTUPLE,
560 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
562 struct rte_eth_ntuple_filter filter = {
563 .flags = RTE_5TUPLE_FLAGS,
564 .dst_ip = rte_bswap32(l2->ip),
565 .dst_ip_mask = UINT32_MAX, /* Enable */
567 .src_ip_mask = 0, /* Disable */
569 .dst_port_mask = 0, /* Disable */
571 .src_port_mask = 0, /* Disable */
572 .proto = IPPROTO_SCTP,
573 .proto_mask = UINT8_MAX, /* Enable */
575 .priority = 2, /* Higher priority than IP */
576 .queue = l1->sctp_local_q,
579 return rte_eth_dev_filter_ctrl(l1->pmd_id,
580 RTE_ETH_FILTER_NTUPLE,
581 RTE_ETH_FILTER_DELETE,
586 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
588 if (cp->arp_q != 0) {
589 int status = app_link_filter_arp_add(cp);
591 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
592 "Adding ARP filter (queue = %" PRIu32 ")",
593 cp->name, cp->pmd_id, cp->arp_q);
596 rte_panic("%s (%" PRIu32 "): "
597 "Error adding ARP filter "
598 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
599 cp->name, cp->pmd_id, cp->arp_q, status);
604 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
606 if (cp->tcp_syn_q != 0) {
607 int status = app_link_filter_tcp_syn_add(cp);
609 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
610 "Adding TCP SYN filter (queue = %" PRIu32 ")",
611 cp->name, cp->pmd_id, cp->tcp_syn_q);
614 rte_panic("%s (%" PRIu32 "): "
615 "Error adding TCP SYN filter "
616 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
617 cp->name, cp->pmd_id, cp->tcp_syn_q,
623 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
628 /* For each link, add filters for IP of current link */
630 for (i = 0; i < app->n_links; i++) {
631 struct app_link_params *p = &app->link_params[i];
634 if (p->ip_local_q != 0) {
635 int status = app_link_filter_ip_add(p, cp);
637 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
638 "Adding IP filter (queue= %" PRIu32
639 ", IP = 0x%08" PRIx32 ")",
640 p->name, p->pmd_id, p->ip_local_q,
644 rte_panic("%s (%" PRIu32 "): "
646 "filter (queue= %" PRIu32 ", "
650 p->ip_local_q, cp->ip, status);
654 if (p->tcp_local_q != 0) {
655 int status = app_link_filter_tcp_add(p, cp);
657 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
660 ", IP = 0x%08" PRIx32 ")",
661 p->name, p->pmd_id, p->tcp_local_q,
665 rte_panic("%s (%" PRIu32 "): "
667 "filter (queue = %" PRIu32 ", "
671 p->tcp_local_q, cp->ip, status);
675 if (p->udp_local_q != 0) {
676 int status = app_link_filter_udp_add(p, cp);
678 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
681 ", IP = 0x%08" PRIx32 ")",
682 p->name, p->pmd_id, p->udp_local_q,
686 rte_panic("%s (%" PRIu32 "): "
688 "filter (queue = %" PRIu32 ", "
692 p->udp_local_q, cp->ip, status);
696 if (p->sctp_local_q != 0) {
697 int status = app_link_filter_sctp_add(p, cp);
699 APP_LOG(app, LOW, "%s (%" PRIu32
700 "): Adding SCTP filter "
702 ", IP = 0x%08" PRIx32 ")",
703 p->name, p->pmd_id, p->sctp_local_q,
707 rte_panic("%s (%" PRIu32 "): "
709 "filter (queue = %" PRIu32 ", "
713 p->sctp_local_q, cp->ip,
720 status = rte_eth_dev_set_link_up(cp->pmd_id);
722 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
723 PRId32 "\n", cp->name, cp->pmd_id, status);
725 /* Mark link as UP */
730 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
736 status = rte_eth_dev_set_link_down(cp->pmd_id);
738 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
739 PRId32 "\n", cp->name, cp->pmd_id, status);
741 /* Mark link as DOWN */
744 /* Return if current link IP is not valid */
748 /* For each link, remove filters for IP of current link */
749 for (i = 0; i < app->n_links; i++) {
750 struct app_link_params *p = &app->link_params[i];
753 if (p->ip_local_q != 0) {
754 int status = app_link_filter_ip_del(p, cp);
756 APP_LOG(app, LOW, "%s (%" PRIu32
757 "): Deleting IP filter "
758 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
759 p->name, p->pmd_id, p->ip_local_q, cp->ip);
762 rte_panic("%s (%" PRIu32
763 "): Error deleting IP filter "
767 p->name, p->pmd_id, p->ip_local_q,
772 if (p->tcp_local_q != 0) {
773 int status = app_link_filter_tcp_del(p, cp);
775 APP_LOG(app, LOW, "%s (%" PRIu32
776 "): Deleting TCP filter "
778 ", IP = 0x%" PRIx32 ")",
779 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
782 rte_panic("%s (%" PRIu32
783 "): Error deleting TCP filter "
787 p->name, p->pmd_id, p->tcp_local_q,
792 if (p->udp_local_q != 0) {
793 int status = app_link_filter_udp_del(p, cp);
795 APP_LOG(app, LOW, "%s (%" PRIu32
796 "): Deleting UDP filter "
797 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
798 p->name, p->pmd_id, p->udp_local_q, cp->ip);
801 rte_panic("%s (%" PRIu32
802 "): Error deleting UDP filter "
806 p->name, p->pmd_id, p->udp_local_q,
811 if (p->sctp_local_q != 0) {
812 int status = app_link_filter_sctp_del(p, cp);
814 APP_LOG(app, LOW, "%s (%" PRIu32
815 "): Deleting SCTP filter "
817 ", IP = 0x%" PRIx32 ")",
818 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
821 rte_panic("%s (%" PRIu32
822 "): Error deleting SCTP filter "
826 p->name, p->pmd_id, p->sctp_local_q,
833 app_check_link(struct app_params *app)
835 uint32_t all_links_up, i;
839 for (i = 0; i < app->n_links; i++) {
840 struct app_link_params *p = &app->link_params[i];
841 struct rte_eth_link link_params;
843 memset(&link_params, 0, sizeof(link_params));
844 rte_eth_link_get(p->pmd_id, &link_params);
846 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
849 link_params.link_speed / 1000,
850 link_params.link_status ? "UP" : "DOWN");
852 if (link_params.link_status == ETH_LINK_DOWN)
856 if (all_links_up == 0)
857 rte_panic("Some links are DOWN\n");
861 is_any_swq_frag_or_ras(struct app_params *app)
865 for (i = 0; i < app->n_pktq_swq; i++) {
866 struct app_pktq_swq_params *p = &app->swq_params[i];
868 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
869 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
877 app_init_link_frag_ras(struct app_params *app)
881 if (is_any_swq_frag_or_ras(app)) {
882 for (i = 0; i < app->n_pktq_hwq_out; i++) {
883 struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];
885 p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
891 app_get_cpu_socket_id(uint32_t pmd_id)
893 int status = rte_eth_dev_socket_id(pmd_id);
895 return (status != SOCKET_ID_ANY) ? status : 0;
899 app_link_rss_enabled(struct app_link_params *cp)
901 return (cp->n_rss_qs) ? 1 : 0;
905 app_link_rss_setup(struct app_link_params *cp)
907 struct rte_eth_dev_info dev_info;
908 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
913 memset(&dev_info, 0, sizeof(dev_info));
914 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
916 if (dev_info.reta_size == 0)
917 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
918 cp->name, cp->pmd_id);
920 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
921 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
922 cp->name, cp->pmd_id);
924 /* Setup RETA contents */
925 memset(reta_conf, 0, sizeof(reta_conf));
927 for (i = 0; i < dev_info.reta_size; i++)
928 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
930 for (i = 0; i < dev_info.reta_size; i++) {
931 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
932 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
933 uint32_t rss_qs_pos = i % cp->n_rss_qs;
935 reta_conf[reta_id].reta[reta_pos] =
936 (uint16_t) cp->rss_qs[rss_qs_pos];
940 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
944 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
945 cp->name, cp->pmd_id);
949 app_init_link_set_config(struct app_link_params *p)
952 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
953 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
960 app_init_link(struct app_params *app)
964 app_init_link_frag_ras(app);
966 for (i = 0; i < app->n_links; i++) {
967 struct app_link_params *p_link = &app->link_params[i];
968 uint32_t link_id, n_hwq_in, n_hwq_out, j;
971 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
972 n_hwq_in = app_link_get_n_rxq(app, p_link);
973 n_hwq_out = app_link_get_n_txq(app, p_link);
974 app_init_link_set_config(p_link);
976 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
977 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
984 status = rte_eth_dev_configure(
990 rte_panic("%s (%" PRId32 "): "
991 "init error (%" PRId32 ")\n",
992 p_link->name, p_link->pmd_id, status);
994 rte_eth_macaddr_get(p_link->pmd_id,
995 (struct ether_addr *) &p_link->mac_addr);
998 rte_eth_promiscuous_enable(p_link->pmd_id);
1001 for (j = 0; j < app->n_pktq_hwq_in; j++) {
1002 struct app_pktq_hwq_in_params *p_rxq =
1003 &app->hwq_in_params[j];
1004 uint32_t rxq_link_id, rxq_queue_id;
1006 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
1007 &rxq_link_id, &rxq_queue_id);
1008 if (rxq_link_id != link_id)
1011 status = rte_eth_rx_queue_setup(
1015 app_get_cpu_socket_id(p_link->pmd_id),
1017 app->mempool[p_rxq->mempool_id]);
1019 rte_panic("%s (%" PRIu32 "): "
1020 "%s init error (%" PRId32 ")\n",
1028 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1029 struct app_pktq_hwq_out_params *p_txq =
1030 &app->hwq_out_params[j];
1031 uint32_t txq_link_id, txq_queue_id;
1033 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1034 &txq_link_id, &txq_queue_id);
1035 if (txq_link_id != link_id)
1038 status = rte_eth_tx_queue_setup(
1042 app_get_cpu_socket_id(p_link->pmd_id),
1045 rte_panic("%s (%" PRIu32 "): "
1046 "%s init error (%" PRId32 ")\n",
1054 status = rte_eth_dev_start(p_link->pmd_id);
1056 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1057 p_link->name, status);
1060 app_link_set_arp_filter(app, p_link);
1061 app_link_set_tcp_syn_filter(app, p_link);
1062 if (app_link_rss_enabled(p_link))
1063 app_link_rss_setup(p_link);
1066 app_link_up_internal(app, p_link);
1069 app_check_link(app);
1073 app_init_swq(struct app_params *app)
1077 for (i = 0; i < app->n_pktq_swq; i++) {
1078 struct app_pktq_swq_params *p = &app->swq_params[i];
1081 if (app_swq_get_readers(app, p) == 1)
1082 flags |= RING_F_SC_DEQ;
1083 if (app_swq_get_writers(app, p) == 1)
1084 flags |= RING_F_SP_ENQ;
1086 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1087 app->swq[i] = rte_ring_create(
1093 if (app->swq[i] == NULL)
1094 rte_panic("%s init error\n", p->name);
1099 app_init_tm(struct app_params *app)
1103 for (i = 0; i < app->n_pktq_tm; i++) {
1104 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1105 struct app_link_params *p_link;
1106 struct rte_eth_link link_eth_params;
1107 struct rte_sched_port *sched;
1108 uint32_t n_subports, subport_id;
1111 p_link = app_get_link_for_tm(app, p_tm);
1113 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1116 p_tm->sched_port_params.name = p_tm->name;
1117 p_tm->sched_port_params.socket =
1118 app_get_cpu_socket_id(p_link->pmd_id);
1119 p_tm->sched_port_params.rate =
1120 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1122 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1123 sched = rte_sched_port_config(&p_tm->sched_port_params);
1125 rte_panic("%s init error\n", p_tm->name);
1129 n_subports = p_tm->sched_port_params.n_subports_per_port;
1130 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1131 uint32_t n_pipes_per_subport, pipe_id;
1133 status = rte_sched_subport_config(sched,
1135 &p_tm->sched_subport_params[subport_id]);
1137 rte_panic("%s subport %" PRIu32
1138 " init error (%" PRId32 ")\n",
1139 p_tm->name, subport_id, status);
1142 n_pipes_per_subport =
1143 p_tm->sched_port_params.n_pipes_per_subport;
1145 pipe_id < n_pipes_per_subport;
1147 int profile_id = p_tm->sched_pipe_to_profile[
1148 subport_id * APP_MAX_SCHED_PIPES +
1151 if (profile_id == -1)
1154 status = rte_sched_pipe_config(sched,
1159 rte_panic("%s subport %" PRIu32
1161 " (profile %" PRId32 ") "
1162 "init error (% " PRId32 ")\n",
1163 p_tm->name, subport_id, pipe_id,
1164 profile_id, status);
1170 #ifndef RTE_EXEC_ENV_LINUXAPP
1172 app_init_tap(struct app_params *app) {
1173 if (app->n_pktq_tap == 0)
1176 rte_panic("TAP device not supported.\n");
1180 app_init_tap(struct app_params *app)
1184 for (i = 0; i < app->n_pktq_tap; i++) {
1185 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1189 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1191 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1193 rte_panic("Cannot open file /dev/net/tun\n");
1195 memset(&ifr, 0, sizeof(ifr));
1196 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1197 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1199 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1201 rte_panic("TAP setup error\n");
1208 #ifdef RTE_LIBRTE_KNI
1210 kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
1213 if (port_id >= rte_eth_dev_count())
1217 rte_eth_dev_set_link_up(port_id) :
1218 rte_eth_dev_set_link_down(port_id);
1224 kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
1227 if (port_id >= rte_eth_dev_count())
1230 if (new_mtu > ETHER_MAX_LEN)
1234 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1240 #endif /* RTE_LIBRTE_KNI */
1242 #ifndef RTE_LIBRTE_KNI
1244 app_init_kni(struct app_params *app) {
1245 if (app->n_pktq_kni == 0)
1248 rte_panic("Can not init KNI without librte_kni support.\n");
1252 app_init_kni(struct app_params *app) {
1255 if (app->n_pktq_kni == 0)
1258 rte_kni_init(app->n_pktq_kni);
1260 for (i = 0; i < app->n_pktq_kni; i++) {
1261 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1262 struct app_link_params *p_link;
1263 struct rte_eth_dev_info dev_info;
1264 struct app_mempool_params *mempool_params;
1265 struct rte_mempool *mempool;
1266 struct rte_kni_conf conf;
1267 struct rte_kni_ops ops;
1270 p_link = app_get_link_for_kni(app, p_kni);
1271 memset(&dev_info, 0, sizeof(dev_info));
1272 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1275 mempool_params = &app->mempool_params[p_kni->mempool_id];
1276 mempool = app->mempool[p_kni->mempool_id];
1279 memset(&conf, 0, sizeof(conf));
1280 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1281 conf.force_bind = p_kni->force_bind;
1282 if (conf.force_bind) {
1285 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1288 p_kni->hyper_th_id);
1291 rte_panic("%s invalid CPU core\n", p_kni->name);
1293 conf.core_id = (uint32_t) lcore_id;
1295 conf.group_id = p_link->pmd_id;
1296 conf.mbuf_size = mempool_params->buffer_size;
1297 conf.addr = dev_info.pci_dev->addr;
1298 conf.id = dev_info.pci_dev->id;
1300 memset(&ops, 0, sizeof(ops));
1301 ops.port_id = (uint8_t) p_link->pmd_id;
1302 ops.change_mtu = kni_change_mtu;
1303 ops.config_network_if = kni_config_network_interface;
1305 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1306 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1308 rte_panic("%s init error\n", p_kni->name);
1311 #endif /* RTE_LIBRTE_KNI */
1314 app_init_msgq(struct app_params *app)
1318 for (i = 0; i < app->n_msgq; i++) {
1319 struct app_msgq_params *p = &app->msgq_params[i];
1321 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1322 app->msgq[i] = rte_ring_create(
1326 RING_F_SP_ENQ | RING_F_SC_DEQ);
1328 if (app->msgq[i] == NULL)
1329 rte_panic("%s init error\n", p->name);
1333 void app_pipeline_params_get(struct app_params *app,
1334 struct app_pipeline_params *p_in,
1335 struct pipeline_params *p_out)
1339 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1341 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1343 p_out->socket_id = (int) p_in->socket_id;
1345 p_out->log_level = app->log_level;
1348 p_out->n_ports_in = p_in->n_pktq_in;
1349 for (i = 0; i < p_in->n_pktq_in; i++) {
1350 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1351 struct pipeline_port_in_params *out = &p_out->port_in[i];
1354 case APP_PKTQ_IN_HWQ:
1356 struct app_pktq_hwq_in_params *p_hwq_in =
1357 &app->hwq_in_params[in->id];
1358 struct app_link_params *p_link =
1359 app_get_link_for_rxq(app, p_hwq_in);
1360 uint32_t rxq_link_id, rxq_queue_id;
1362 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1366 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1367 out->params.ethdev.port_id = p_link->pmd_id;
1368 out->params.ethdev.queue_id = rxq_queue_id;
1369 out->burst_size = p_hwq_in->burst;
1372 case APP_PKTQ_IN_SWQ:
1374 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1376 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1377 if (app_swq_get_readers(app, swq_params) == 1) {
1378 out->type = PIPELINE_PORT_IN_RING_READER;
1379 out->params.ring.ring = app->swq[in->id];
1380 out->burst_size = app->swq_params[in->id].burst_read;
1382 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1383 out->params.ring_multi.ring = app->swq[in->id];
1384 out->burst_size = swq_params->burst_read;
1387 if (swq_params->ipv4_frag == 1) {
1388 struct rte_port_ring_reader_ipv4_frag_params *params =
1389 &out->params.ring_ipv4_frag;
1391 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1392 params->ring = app->swq[in->id];
1393 params->mtu = swq_params->mtu;
1394 params->metadata_size = swq_params->metadata_size;
1395 params->pool_direct =
1396 app->mempool[swq_params->mempool_direct_id];
1397 params->pool_indirect =
1398 app->mempool[swq_params->mempool_indirect_id];
1399 out->burst_size = swq_params->burst_read;
1401 struct rte_port_ring_reader_ipv6_frag_params *params =
1402 &out->params.ring_ipv6_frag;
1404 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1405 params->ring = app->swq[in->id];
1406 params->mtu = swq_params->mtu;
1407 params->metadata_size = swq_params->metadata_size;
1408 params->pool_direct =
1409 app->mempool[swq_params->mempool_direct_id];
1410 params->pool_indirect =
1411 app->mempool[swq_params->mempool_indirect_id];
1412 out->burst_size = swq_params->burst_read;
1417 case APP_PKTQ_IN_TM:
1419 out->type = PIPELINE_PORT_IN_SCHED_READER;
1420 out->params.sched.sched = app->tm[in->id];
1421 out->burst_size = app->tm_params[in->id].burst_read;
1424 #ifdef RTE_EXEC_ENV_LINUXAPP
1425 case APP_PKTQ_IN_TAP:
1427 struct app_pktq_tap_params *tap_params =
1428 &app->tap_params[in->id];
1429 struct app_mempool_params *mempool_params =
1430 &app->mempool_params[tap_params->mempool_id];
1431 struct rte_mempool *mempool =
1432 app->mempool[tap_params->mempool_id];
1434 out->type = PIPELINE_PORT_IN_FD_READER;
1435 out->params.fd.fd = app->tap[in->id];
1436 out->params.fd.mtu = mempool_params->buffer_size;
1437 out->params.fd.mempool = mempool;
1438 out->burst_size = app->tap_params[in->id].burst_read;
1442 #ifdef RTE_LIBRTE_KNI
1443 case APP_PKTQ_IN_KNI:
1445 out->type = PIPELINE_PORT_IN_KNI_READER;
1446 out->params.kni.kni = app->kni[in->id];
1447 out->burst_size = app->kni_params[in->id].burst_read;
1450 #endif /* RTE_LIBRTE_KNI */
1451 case APP_PKTQ_IN_SOURCE:
1453 uint32_t mempool_id =
1454 app->source_params[in->id].mempool_id;
1456 out->type = PIPELINE_PORT_IN_SOURCE;
1457 out->params.source.mempool = app->mempool[mempool_id];
1458 out->burst_size = app->source_params[in->id].burst;
1459 out->params.source.file_name =
1460 app->source_params[in->id].file_name;
1461 out->params.source.n_bytes_per_pkt =
1462 app->source_params[in->id].n_bytes_per_pkt;
1471 p_out->n_ports_out = p_in->n_pktq_out;
1472 for (i = 0; i < p_in->n_pktq_out; i++) {
1473 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1474 struct pipeline_port_out_params *out = &p_out->port_out[i];
1477 case APP_PKTQ_OUT_HWQ:
1479 struct app_pktq_hwq_out_params *p_hwq_out =
1480 &app->hwq_out_params[in->id];
1481 struct app_link_params *p_link =
1482 app_get_link_for_txq(app, p_hwq_out);
1483 uint32_t txq_link_id, txq_queue_id;
1485 sscanf(p_hwq_out->name,
1486 "TXQ%" SCNu32 ".%" SCNu32,
1490 if (p_hwq_out->dropless == 0) {
1491 struct rte_port_ethdev_writer_params *params =
1492 &out->params.ethdev;
1494 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1495 params->port_id = p_link->pmd_id;
1496 params->queue_id = txq_queue_id;
1497 params->tx_burst_sz =
1498 app->hwq_out_params[in->id].burst;
1500 struct rte_port_ethdev_writer_nodrop_params
1501 *params = &out->params.ethdev_nodrop;
1504 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1505 params->port_id = p_link->pmd_id;
1506 params->queue_id = txq_queue_id;
1507 params->tx_burst_sz = p_hwq_out->burst;
1508 params->n_retries = p_hwq_out->n_retries;
1512 case APP_PKTQ_OUT_SWQ:
1514 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1516 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1517 if (app_swq_get_writers(app, swq_params) == 1) {
1518 if (app->swq_params[in->id].dropless == 0) {
1519 struct rte_port_ring_writer_params *params =
1522 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1523 params->ring = app->swq[in->id];
1524 params->tx_burst_sz =
1525 app->swq_params[in->id].burst_write;
1527 struct rte_port_ring_writer_nodrop_params
1528 *params = &out->params.ring_nodrop;
1531 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1532 params->ring = app->swq[in->id];
1533 params->tx_burst_sz =
1534 app->swq_params[in->id].burst_write;
1536 app->swq_params[in->id].n_retries;
1539 if (swq_params->dropless == 0) {
1540 struct rte_port_ring_multi_writer_params *params =
1541 &out->params.ring_multi;
1543 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1544 params->ring = app->swq[in->id];
1545 params->tx_burst_sz = swq_params->burst_write;
1547 struct rte_port_ring_multi_writer_nodrop_params
1548 *params = &out->params.ring_multi_nodrop;
1550 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1551 params->ring = app->swq[in->id];
1552 params->tx_burst_sz = swq_params->burst_write;
1553 params->n_retries = swq_params->n_retries;
1557 if (swq_params->ipv4_ras == 1) {
1558 struct rte_port_ring_writer_ipv4_ras_params *params =
1559 &out->params.ring_ipv4_ras;
1561 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1562 params->ring = app->swq[in->id];
1563 params->tx_burst_sz = swq_params->burst_write;
1565 struct rte_port_ring_writer_ipv6_ras_params *params =
1566 &out->params.ring_ipv6_ras;
1568 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1569 params->ring = app->swq[in->id];
1570 params->tx_burst_sz = swq_params->burst_write;
1575 case APP_PKTQ_OUT_TM:
1577 struct rte_port_sched_writer_params *params =
1580 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1581 params->sched = app->tm[in->id];
1582 params->tx_burst_sz =
1583 app->tm_params[in->id].burst_write;
1586 #ifdef RTE_EXEC_ENV_LINUXAPP
1587 case APP_PKTQ_OUT_TAP:
1589 struct rte_port_fd_writer_params *params =
1592 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1593 params->fd = app->tap[in->id];
1594 params->tx_burst_sz =
1595 app->tap_params[in->id].burst_write;
1599 #ifdef RTE_LIBRTE_KNI
1600 case APP_PKTQ_OUT_KNI:
1602 struct app_pktq_kni_params *p_kni =
1603 &app->kni_params[in->id];
1605 if (p_kni->dropless == 0) {
1606 struct rte_port_kni_writer_params *params =
1609 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1610 params->kni = app->kni[in->id];
1611 params->tx_burst_sz =
1612 app->kni_params[in->id].burst_write;
1614 struct rte_port_kni_writer_nodrop_params
1615 *params = &out->params.kni_nodrop;
1617 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1618 params->kni = app->kni[in->id];
1619 params->tx_burst_sz =
1620 app->kni_params[in->id].burst_write;
1622 app->kni_params[in->id].n_retries;
1626 #endif /* RTE_LIBRTE_KNI */
1627 case APP_PKTQ_OUT_SINK:
1629 out->type = PIPELINE_PORT_OUT_SINK;
1630 out->params.sink.file_name =
1631 app->sink_params[in->id].file_name;
1632 out->params.sink.max_n_pkts =
1633 app->sink_params[in->id].
1644 p_out->n_msgq = p_in->n_msgq_in;
1646 for (i = 0; i < p_in->n_msgq_in; i++)
1647 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1649 for (i = 0; i < p_in->n_msgq_out; i++)
1650 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1653 p_out->n_args = p_in->n_args;
1654 for (i = 0; i < p_in->n_args; i++) {
1655 p_out->args_name[i] = p_in->args_name[i];
1656 p_out->args_value[i] = p_in->args_value[i];
1661 app_init_pipelines(struct app_params *app)
1665 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1666 struct app_pipeline_params *params =
1667 &app->pipeline_params[p_id];
1668 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1669 struct pipeline_type *ptype;
1670 struct pipeline_params pp;
1672 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1674 ptype = app_pipeline_type_find(app, params->type);
1676 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1679 app_pipeline_params_get(app, params, &pp);
1683 if (ptype->be_ops->f_init) {
1684 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1686 if (data->be == NULL)
1687 rte_panic("Pipeline instance \"%s\" back-end "
1688 "init error\n", params->name);
1693 if (ptype->fe_ops->f_init) {
1694 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1696 if (data->fe == NULL)
1697 rte_panic("Pipeline instance \"%s\" front-end "
1698 "init error\n", params->name);
1701 data->ptype = ptype;
1703 data->timer_period = (rte_get_tsc_hz() *
1704 params->timer_period) / 100;
1709 app_post_init_pipelines(struct app_params *app)
1713 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1714 struct app_pipeline_params *params =
1715 &app->pipeline_params[p_id];
1716 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1719 if (data->ptype->fe_ops->f_post_init == NULL)
1722 status = data->ptype->fe_ops->f_post_init(data->fe);
1724 rte_panic("Pipeline instance \"%s\" front-end "
1725 "post-init error\n", params->name);
1730 app_init_threads(struct app_params *app)
1732 uint64_t time = rte_get_tsc_cycles();
1735 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1736 struct app_pipeline_params *params =
1737 &app->pipeline_params[p_id];
1738 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1739 struct pipeline_type *ptype;
1740 struct app_thread_data *t;
1741 struct app_thread_pipeline_data *p;
1744 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1747 params->hyper_th_id);
1750 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1753 (params->hyper_th_id) ? "h" : "");
1755 t = &app->thread_data[lcore_id];
1757 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1758 t->thread_req_deadline = time + t->timer_period;
1760 t->headroom_cycles = 0;
1761 t->headroom_time = rte_get_tsc_cycles();
1762 t->headroom_ratio = 0.0;
1764 t->msgq_in = app_thread_msgq_in_get(app,
1767 params->hyper_th_id);
1768 if (t->msgq_in == NULL)
1769 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1772 t->msgq_out = app_thread_msgq_out_get(app,
1775 params->hyper_th_id);
1776 if (t->msgq_out == NULL)
1777 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1780 ptype = app_pipeline_type_find(app, params->type);
1782 rte_panic("Init error: Unknown pipeline "
1783 "type \"%s\"\n", params->type);
1785 p = (ptype->be_ops->f_run == NULL) ?
1786 &t->regular[t->n_regular] :
1787 &t->custom[t->n_custom];
1789 p->pipeline_id = p_id;
1791 p->f_run = ptype->be_ops->f_run;
1792 p->f_timer = ptype->be_ops->f_timer;
1793 p->timer_period = data->timer_period;
1794 p->deadline = time + data->timer_period;
1798 if (ptype->be_ops->f_run == NULL)
1805 int app_init(struct app_params *app)
1807 app_init_core_map(app);
1808 app_init_core_mask(app);
1811 app_init_mempool(app);
1819 app_pipeline_common_cmd_push(app);
1820 app_pipeline_thread_cmd_push(app);
1821 app_pipeline_type_register(app, &pipeline_master);
1822 app_pipeline_type_register(app, &pipeline_passthrough);
1823 app_pipeline_type_register(app, &pipeline_flow_classification);
1824 app_pipeline_type_register(app, &pipeline_flow_actions);
1825 app_pipeline_type_register(app, &pipeline_firewall);
1826 app_pipeline_type_register(app, &pipeline_routing);
1828 app_init_pipelines(app);
1829 app_init_threads(app);
1834 int app_post_init(struct app_params *app)
1836 app_post_init_pipelines(app);
1842 app_pipeline_type_cmd_push(struct app_params *app,
1843 struct pipeline_type *ptype)
1845 cmdline_parse_ctx_t *cmds;
1848 /* Check input arguments */
1849 if ((app == NULL) ||
1853 n_cmds = pipeline_type_cmds_count(ptype);
1857 cmds = ptype->fe_ops->cmds;
1859 /* Check for available slots in the application commands array */
1860 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1863 /* Push pipeline commands into the application */
1864 memcpy(&app->cmds[app->n_cmds],
1866 n_cmds * sizeof(cmdline_parse_ctx_t));
1868 for (i = 0; i < n_cmds; i++)
1869 app->cmds[app->n_cmds + i]->data = app;
1871 app->n_cmds += n_cmds;
1872 app->cmds[app->n_cmds] = NULL;
1878 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1882 /* Check input arguments */
1883 if ((app == NULL) ||
1885 (ptype->name == NULL) ||
1886 (strlen(ptype->name) == 0) ||
1887 (ptype->be_ops->f_init == NULL) ||
1888 (ptype->be_ops->f_timer == NULL))
1891 /* Check for duplicate entry */
1892 for (i = 0; i < app->n_pipeline_types; i++)
1893 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1896 /* Check for resource availability */
1897 n_cmds = pipeline_type_cmds_count(ptype);
1898 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1899 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1902 /* Copy pipeline type */
1903 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1905 sizeof(struct pipeline_type));
1907 /* Copy CLI commands */
1909 app_pipeline_type_cmd_push(app, ptype);
1915 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1919 for (i = 0; i < app->n_pipeline_types; i++)
1920 if (strcmp(app->pipeline_type[i].name, name) == 0)
1921 return &app->pipeline_type[i];