4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <netinet/in.h>
38 #ifdef RTE_EXEC_ENV_LINUXAPP
40 #include <linux/if_tun.h>
43 #include <sys/ioctl.h>
46 #include <rte_cycles.h>
47 #include <rte_ethdev.h>
48 #include <rte_ether.h>
51 #include <rte_malloc.h>
52 #include <rte_bus_pci.h>
56 #include "pipeline_common_fe.h"
57 #include "pipeline_master.h"
58 #include "pipeline_passthrough.h"
59 #include "pipeline_firewall.h"
60 #include "pipeline_flow_classification.h"
61 #include "pipeline_flow_actions.h"
62 #include "pipeline_routing.h"
63 #include "thread_fe.h"
65 #define APP_NAME_SIZE 32
67 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
70 app_init_core_map(struct app_params *app)
72 APP_LOG(app, HIGH, "Initializing CPU core map ...");
73 app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
76 if (app->core_map == NULL)
77 rte_panic("Cannot create CPU core map\n");
79 if (app->log_level >= APP_LOG_LEVEL_LOW)
80 cpu_core_map_print(app->core_map);
83 /* Core Mask String in Hex Representation */
84 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
87 app_init_core_mask(struct app_params *app)
90 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
92 for (i = 0; i < app->n_pipelines; i++) {
93 struct app_pipeline_params *p = &app->pipeline_params[i];
96 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
102 rte_panic("Cannot create CPU core mask\n");
104 app_core_enable_in_core_mask(app, lcore_id);
107 app_core_build_core_mask_string(app, core_mask_str);
108 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
112 app_init_eal(struct app_params *app)
115 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
116 struct app_eal_params *p = &app->eal_params;
121 app->eal_argv[n_args++] = strdup(app->app_name);
123 app_core_build_core_mask_string(app, core_mask_str);
124 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
125 app->eal_argv[n_args++] = strdup(buffer);
128 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
129 app->eal_argv[n_args++] = strdup(buffer);
132 if (p->master_lcore_present) {
135 "--master-lcore=%" PRIu32,
137 app->eal_argv[n_args++] = strdup(buffer);
140 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
141 app->eal_argv[n_args++] = strdup(buffer);
143 if (p->memory_present) {
144 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
145 app->eal_argv[n_args++] = strdup(buffer);
148 if (p->ranks_present) {
149 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
150 app->eal_argv[n_args++] = strdup(buffer);
153 for (i = 0; i < APP_MAX_LINKS; i++) {
154 if (p->pci_blacklist[i] == NULL)
159 "--pci-blacklist=%s",
160 p->pci_blacklist[i]);
161 app->eal_argv[n_args++] = strdup(buffer);
164 if (app->port_mask != 0)
165 for (i = 0; i < APP_MAX_LINKS; i++) {
166 if (p->pci_whitelist[i] == NULL)
171 "--pci-whitelist=%s",
172 p->pci_whitelist[i]);
173 app->eal_argv[n_args++] = strdup(buffer);
176 for (i = 0; i < app->n_links; i++) {
177 char *pci_bdf = app->link_params[i].pci_bdf;
181 "--pci-whitelist=%s",
183 app->eal_argv[n_args++] = strdup(buffer);
186 for (i = 0; i < APP_MAX_LINKS; i++) {
187 if (p->vdev[i] == NULL)
194 app->eal_argv[n_args++] = strdup(buffer);
197 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
198 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
199 app->eal_argv[n_args++] = strdup(buffer);
207 app->eal_argv[n_args++] = strdup(buffer);
211 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
212 app->eal_argv[n_args++] = strdup(buffer);
215 if (p->log_level_present) {
218 "--log-level=%" PRIu32,
220 app->eal_argv[n_args++] = strdup(buffer);
223 if ((p->version_present) && p->version) {
224 snprintf(buffer, sizeof(buffer), "-v");
225 app->eal_argv[n_args++] = strdup(buffer);
228 if ((p->help_present) && p->help) {
229 snprintf(buffer, sizeof(buffer), "--help");
230 app->eal_argv[n_args++] = strdup(buffer);
233 if ((p->no_huge_present) && p->no_huge) {
234 snprintf(buffer, sizeof(buffer), "--no-huge");
235 app->eal_argv[n_args++] = strdup(buffer);
238 if ((p->no_pci_present) && p->no_pci) {
239 snprintf(buffer, sizeof(buffer), "--no-pci");
240 app->eal_argv[n_args++] = strdup(buffer);
243 if ((p->no_hpet_present) && p->no_hpet) {
244 snprintf(buffer, sizeof(buffer), "--no-hpet");
245 app->eal_argv[n_args++] = strdup(buffer);
248 if ((p->no_shconf_present) && p->no_shconf) {
249 snprintf(buffer, sizeof(buffer), "--no-shconf");
250 app->eal_argv[n_args++] = strdup(buffer);
254 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
255 app->eal_argv[n_args++] = strdup(buffer);
263 app->eal_argv[n_args++] = strdup(buffer);
267 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
268 app->eal_argv[n_args++] = strdup(buffer);
271 if (p->file_prefix) {
276 app->eal_argv[n_args++] = strdup(buffer);
279 if (p->base_virtaddr) {
282 "--base-virtaddr=%s",
284 app->eal_argv[n_args++] = strdup(buffer);
287 if ((p->create_uio_dev_present) && p->create_uio_dev) {
288 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
289 app->eal_argv[n_args++] = strdup(buffer);
297 app->eal_argv[n_args++] = strdup(buffer);
300 snprintf(buffer, sizeof(buffer), "--");
301 app->eal_argv[n_args++] = strdup(buffer);
303 app->eal_argc = n_args;
305 APP_LOG(app, HIGH, "Initializing EAL ...");
306 if (app->log_level >= APP_LOG_LEVEL_LOW) {
309 fprintf(stdout, "[APP] EAL arguments: \"");
310 for (i = 1; i < app->eal_argc; i++)
311 fprintf(stdout, "%s ", app->eal_argv[i]);
312 fprintf(stdout, "\"\n");
315 status = rte_eal_init(app->eal_argc, app->eal_argv);
317 rte_panic("EAL init error\n");
321 app_init_mempool(struct app_params *app)
325 for (i = 0; i < app->n_mempools; i++) {
326 struct app_mempool_params *p = &app->mempool_params[i];
328 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
329 app->mempool[i] = rte_pktmbuf_pool_create(
335 sizeof(struct rte_mbuf), /* mbuf data size */
338 if (app->mempool[i] == NULL)
339 rte_panic("%s init error\n", p->name);
344 app_link_filter_arp_add(struct app_link_params *link)
346 struct rte_eth_ethertype_filter filter = {
347 .ether_type = ETHER_TYPE_ARP,
349 .queue = link->arp_q,
352 return rte_eth_dev_filter_ctrl(link->pmd_id,
353 RTE_ETH_FILTER_ETHERTYPE,
359 app_link_filter_tcp_syn_add(struct app_link_params *link)
361 struct rte_eth_syn_filter filter = {
363 .queue = link->tcp_syn_q,
366 return rte_eth_dev_filter_ctrl(link->pmd_id,
373 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
375 struct rte_eth_ntuple_filter filter = {
376 .flags = RTE_5TUPLE_FLAGS,
377 .dst_ip = rte_bswap32(l2->ip),
378 .dst_ip_mask = UINT32_MAX, /* Enable */
380 .src_ip_mask = 0, /* Disable */
382 .dst_port_mask = 0, /* Disable */
384 .src_port_mask = 0, /* Disable */
386 .proto_mask = 0, /* Disable */
388 .priority = 1, /* Lowest */
389 .queue = l1->ip_local_q,
392 return rte_eth_dev_filter_ctrl(l1->pmd_id,
393 RTE_ETH_FILTER_NTUPLE,
399 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
401 struct rte_eth_ntuple_filter filter = {
402 .flags = RTE_5TUPLE_FLAGS,
403 .dst_ip = rte_bswap32(l2->ip),
404 .dst_ip_mask = UINT32_MAX, /* Enable */
406 .src_ip_mask = 0, /* Disable */
408 .dst_port_mask = 0, /* Disable */
410 .src_port_mask = 0, /* Disable */
412 .proto_mask = 0, /* Disable */
414 .priority = 1, /* Lowest */
415 .queue = l1->ip_local_q,
418 return rte_eth_dev_filter_ctrl(l1->pmd_id,
419 RTE_ETH_FILTER_NTUPLE,
420 RTE_ETH_FILTER_DELETE,
425 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
427 struct rte_eth_ntuple_filter filter = {
428 .flags = RTE_5TUPLE_FLAGS,
429 .dst_ip = rte_bswap32(l2->ip),
430 .dst_ip_mask = UINT32_MAX, /* Enable */
432 .src_ip_mask = 0, /* Disable */
434 .dst_port_mask = 0, /* Disable */
436 .src_port_mask = 0, /* Disable */
437 .proto = IPPROTO_TCP,
438 .proto_mask = UINT8_MAX, /* Enable */
440 .priority = 2, /* Higher priority than IP */
441 .queue = l1->tcp_local_q,
444 return rte_eth_dev_filter_ctrl(l1->pmd_id,
445 RTE_ETH_FILTER_NTUPLE,
451 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
453 struct rte_eth_ntuple_filter filter = {
454 .flags = RTE_5TUPLE_FLAGS,
455 .dst_ip = rte_bswap32(l2->ip),
456 .dst_ip_mask = UINT32_MAX, /* Enable */
458 .src_ip_mask = 0, /* Disable */
460 .dst_port_mask = 0, /* Disable */
462 .src_port_mask = 0, /* Disable */
463 .proto = IPPROTO_TCP,
464 .proto_mask = UINT8_MAX, /* Enable */
466 .priority = 2, /* Higher priority than IP */
467 .queue = l1->tcp_local_q,
470 return rte_eth_dev_filter_ctrl(l1->pmd_id,
471 RTE_ETH_FILTER_NTUPLE,
472 RTE_ETH_FILTER_DELETE,
477 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
479 struct rte_eth_ntuple_filter filter = {
480 .flags = RTE_5TUPLE_FLAGS,
481 .dst_ip = rte_bswap32(l2->ip),
482 .dst_ip_mask = UINT32_MAX, /* Enable */
484 .src_ip_mask = 0, /* Disable */
486 .dst_port_mask = 0, /* Disable */
488 .src_port_mask = 0, /* Disable */
489 .proto = IPPROTO_UDP,
490 .proto_mask = UINT8_MAX, /* Enable */
492 .priority = 2, /* Higher priority than IP */
493 .queue = l1->udp_local_q,
496 return rte_eth_dev_filter_ctrl(l1->pmd_id,
497 RTE_ETH_FILTER_NTUPLE,
503 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
505 struct rte_eth_ntuple_filter filter = {
506 .flags = RTE_5TUPLE_FLAGS,
507 .dst_ip = rte_bswap32(l2->ip),
508 .dst_ip_mask = UINT32_MAX, /* Enable */
510 .src_ip_mask = 0, /* Disable */
512 .dst_port_mask = 0, /* Disable */
514 .src_port_mask = 0, /* Disable */
515 .proto = IPPROTO_UDP,
516 .proto_mask = UINT8_MAX, /* Enable */
518 .priority = 2, /* Higher priority than IP */
519 .queue = l1->udp_local_q,
522 return rte_eth_dev_filter_ctrl(l1->pmd_id,
523 RTE_ETH_FILTER_NTUPLE,
524 RTE_ETH_FILTER_DELETE,
529 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
531 struct rte_eth_ntuple_filter filter = {
532 .flags = RTE_5TUPLE_FLAGS,
533 .dst_ip = rte_bswap32(l2->ip),
534 .dst_ip_mask = UINT32_MAX, /* Enable */
536 .src_ip_mask = 0, /* Disable */
538 .dst_port_mask = 0, /* Disable */
540 .src_port_mask = 0, /* Disable */
541 .proto = IPPROTO_SCTP,
542 .proto_mask = UINT8_MAX, /* Enable */
544 .priority = 2, /* Higher priority than IP */
545 .queue = l1->sctp_local_q,
548 return rte_eth_dev_filter_ctrl(l1->pmd_id,
549 RTE_ETH_FILTER_NTUPLE,
555 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
557 struct rte_eth_ntuple_filter filter = {
558 .flags = RTE_5TUPLE_FLAGS,
559 .dst_ip = rte_bswap32(l2->ip),
560 .dst_ip_mask = UINT32_MAX, /* Enable */
562 .src_ip_mask = 0, /* Disable */
564 .dst_port_mask = 0, /* Disable */
566 .src_port_mask = 0, /* Disable */
567 .proto = IPPROTO_SCTP,
568 .proto_mask = UINT8_MAX, /* Enable */
570 .priority = 2, /* Higher priority than IP */
571 .queue = l1->sctp_local_q,
574 return rte_eth_dev_filter_ctrl(l1->pmd_id,
575 RTE_ETH_FILTER_NTUPLE,
576 RTE_ETH_FILTER_DELETE,
581 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
583 if (cp->arp_q != 0) {
584 int status = app_link_filter_arp_add(cp);
586 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
587 "Adding ARP filter (queue = %" PRIu32 ")",
588 cp->name, cp->pmd_id, cp->arp_q);
591 rte_panic("%s (%" PRIu32 "): "
592 "Error adding ARP filter "
593 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
594 cp->name, cp->pmd_id, cp->arp_q, status);
599 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
601 if (cp->tcp_syn_q != 0) {
602 int status = app_link_filter_tcp_syn_add(cp);
604 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
605 "Adding TCP SYN filter (queue = %" PRIu32 ")",
606 cp->name, cp->pmd_id, cp->tcp_syn_q);
609 rte_panic("%s (%" PRIu32 "): "
610 "Error adding TCP SYN filter "
611 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
612 cp->name, cp->pmd_id, cp->tcp_syn_q,
618 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
623 /* For each link, add filters for IP of current link */
625 for (i = 0; i < app->n_links; i++) {
626 struct app_link_params *p = &app->link_params[i];
629 if (p->ip_local_q != 0) {
630 int status = app_link_filter_ip_add(p, cp);
632 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
633 "Adding IP filter (queue= %" PRIu32
634 ", IP = 0x%08" PRIx32 ")",
635 p->name, p->pmd_id, p->ip_local_q,
639 rte_panic("%s (%" PRIu32 "): "
641 "filter (queue= %" PRIu32 ", "
645 p->ip_local_q, cp->ip, status);
649 if (p->tcp_local_q != 0) {
650 int status = app_link_filter_tcp_add(p, cp);
652 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
655 ", IP = 0x%08" PRIx32 ")",
656 p->name, p->pmd_id, p->tcp_local_q,
660 rte_panic("%s (%" PRIu32 "): "
662 "filter (queue = %" PRIu32 ", "
666 p->tcp_local_q, cp->ip, status);
670 if (p->udp_local_q != 0) {
671 int status = app_link_filter_udp_add(p, cp);
673 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
676 ", IP = 0x%08" PRIx32 ")",
677 p->name, p->pmd_id, p->udp_local_q,
681 rte_panic("%s (%" PRIu32 "): "
683 "filter (queue = %" PRIu32 ", "
687 p->udp_local_q, cp->ip, status);
691 if (p->sctp_local_q != 0) {
692 int status = app_link_filter_sctp_add(p, cp);
694 APP_LOG(app, LOW, "%s (%" PRIu32
695 "): Adding SCTP filter "
697 ", IP = 0x%08" PRIx32 ")",
698 p->name, p->pmd_id, p->sctp_local_q,
702 rte_panic("%s (%" PRIu32 "): "
704 "filter (queue = %" PRIu32 ", "
708 p->sctp_local_q, cp->ip,
715 status = rte_eth_dev_set_link_up(cp->pmd_id);
716 /* Do not panic if PMD does not provide link up functionality */
717 if (status < 0 && status != -ENOTSUP)
718 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
719 PRId32 "\n", cp->name, cp->pmd_id, status);
721 /* Mark link as UP */
726 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
732 status = rte_eth_dev_set_link_down(cp->pmd_id);
733 /* Do not panic if PMD does not provide link down functionality */
734 if (status < 0 && status != -ENOTSUP)
735 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
736 PRId32 "\n", cp->name, cp->pmd_id, status);
738 /* Mark link as DOWN */
741 /* Return if current link IP is not valid */
745 /* For each link, remove filters for IP of current link */
746 for (i = 0; i < app->n_links; i++) {
747 struct app_link_params *p = &app->link_params[i];
750 if (p->ip_local_q != 0) {
751 int status = app_link_filter_ip_del(p, cp);
753 APP_LOG(app, LOW, "%s (%" PRIu32
754 "): Deleting IP filter "
755 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
756 p->name, p->pmd_id, p->ip_local_q, cp->ip);
759 rte_panic("%s (%" PRIu32
760 "): Error deleting IP filter "
764 p->name, p->pmd_id, p->ip_local_q,
769 if (p->tcp_local_q != 0) {
770 int status = app_link_filter_tcp_del(p, cp);
772 APP_LOG(app, LOW, "%s (%" PRIu32
773 "): Deleting TCP filter "
775 ", IP = 0x%" PRIx32 ")",
776 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
779 rte_panic("%s (%" PRIu32
780 "): Error deleting TCP filter "
784 p->name, p->pmd_id, p->tcp_local_q,
789 if (p->udp_local_q != 0) {
790 int status = app_link_filter_udp_del(p, cp);
792 APP_LOG(app, LOW, "%s (%" PRIu32
793 "): Deleting UDP filter "
794 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
795 p->name, p->pmd_id, p->udp_local_q, cp->ip);
798 rte_panic("%s (%" PRIu32
799 "): Error deleting UDP filter "
803 p->name, p->pmd_id, p->udp_local_q,
808 if (p->sctp_local_q != 0) {
809 int status = app_link_filter_sctp_del(p, cp);
811 APP_LOG(app, LOW, "%s (%" PRIu32
812 "): Deleting SCTP filter "
814 ", IP = 0x%" PRIx32 ")",
815 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
818 rte_panic("%s (%" PRIu32
819 "): Error deleting SCTP filter "
823 p->name, p->pmd_id, p->sctp_local_q,
830 app_check_link(struct app_params *app)
832 uint32_t all_links_up, i;
836 for (i = 0; i < app->n_links; i++) {
837 struct app_link_params *p = &app->link_params[i];
838 struct rte_eth_link link_params;
840 memset(&link_params, 0, sizeof(link_params));
841 rte_eth_link_get(p->pmd_id, &link_params);
843 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
846 link_params.link_speed / 1000,
847 link_params.link_status ? "UP" : "DOWN");
849 if (link_params.link_status == ETH_LINK_DOWN)
853 if (all_links_up == 0)
854 rte_panic("Some links are DOWN\n");
858 is_any_swq_frag_or_ras(struct app_params *app)
862 for (i = 0; i < app->n_pktq_swq; i++) {
863 struct app_pktq_swq_params *p = &app->swq_params[i];
865 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
866 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
874 app_init_link_frag_ras(struct app_params *app)
878 if (is_any_swq_frag_or_ras(app)) {
879 for (i = 0; i < app->n_pktq_hwq_out; i++) {
880 struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];
882 p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
888 app_get_cpu_socket_id(uint32_t pmd_id)
890 int status = rte_eth_dev_socket_id(pmd_id);
892 return (status != SOCKET_ID_ANY) ? status : 0;
896 app_link_rss_enabled(struct app_link_params *cp)
898 return (cp->n_rss_qs) ? 1 : 0;
902 app_link_rss_setup(struct app_link_params *cp)
904 struct rte_eth_dev_info dev_info;
905 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
910 memset(&dev_info, 0, sizeof(dev_info));
911 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
913 if (dev_info.reta_size == 0)
914 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
915 cp->name, cp->pmd_id);
917 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
918 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
919 cp->name, cp->pmd_id);
921 /* Setup RETA contents */
922 memset(reta_conf, 0, sizeof(reta_conf));
924 for (i = 0; i < dev_info.reta_size; i++)
925 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
927 for (i = 0; i < dev_info.reta_size; i++) {
928 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
929 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
930 uint32_t rss_qs_pos = i % cp->n_rss_qs;
932 reta_conf[reta_id].reta[reta_pos] =
933 (uint16_t) cp->rss_qs[rss_qs_pos];
937 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
941 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
942 cp->name, cp->pmd_id);
946 app_init_link_set_config(struct app_link_params *p)
949 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
950 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
957 app_init_link(struct app_params *app)
961 app_init_link_frag_ras(app);
963 for (i = 0; i < app->n_links; i++) {
964 struct app_link_params *p_link = &app->link_params[i];
965 uint32_t link_id, n_hwq_in, n_hwq_out, j;
968 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
969 n_hwq_in = app_link_get_n_rxq(app, p_link);
970 n_hwq_out = app_link_get_n_txq(app, p_link);
971 app_init_link_set_config(p_link);
973 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
974 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
981 status = rte_eth_dev_configure(
987 rte_panic("%s (%" PRId32 "): "
988 "init error (%" PRId32 ")\n",
989 p_link->name, p_link->pmd_id, status);
991 rte_eth_macaddr_get(p_link->pmd_id,
992 (struct ether_addr *) &p_link->mac_addr);
995 rte_eth_promiscuous_enable(p_link->pmd_id);
998 for (j = 0; j < app->n_pktq_hwq_in; j++) {
999 struct app_pktq_hwq_in_params *p_rxq =
1000 &app->hwq_in_params[j];
1001 uint32_t rxq_link_id, rxq_queue_id;
1002 uint16_t nb_rxd = p_rxq->size;
1004 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
1005 &rxq_link_id, &rxq_queue_id);
1006 if (rxq_link_id != link_id)
1009 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1014 rte_panic("%s (%" PRIu32 "): "
1015 "%s adjust number of Rx descriptors "
1016 "error (%" PRId32 ")\n",
1022 status = rte_eth_rx_queue_setup(
1026 app_get_cpu_socket_id(p_link->pmd_id),
1028 app->mempool[p_rxq->mempool_id]);
1030 rte_panic("%s (%" PRIu32 "): "
1031 "%s init error (%" PRId32 ")\n",
1039 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1040 struct app_pktq_hwq_out_params *p_txq =
1041 &app->hwq_out_params[j];
1042 uint32_t txq_link_id, txq_queue_id;
1043 uint16_t nb_txd = p_txq->size;
1045 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1046 &txq_link_id, &txq_queue_id);
1047 if (txq_link_id != link_id)
1050 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1055 rte_panic("%s (%" PRIu32 "): "
1056 "%s adjust number of Tx descriptors "
1057 "error (%" PRId32 ")\n",
1063 status = rte_eth_tx_queue_setup(
1067 app_get_cpu_socket_id(p_link->pmd_id),
1070 rte_panic("%s (%" PRIu32 "): "
1071 "%s init error (%" PRId32 ")\n",
1079 status = rte_eth_dev_start(p_link->pmd_id);
1081 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1082 p_link->name, status);
1085 app_link_set_arp_filter(app, p_link);
1086 app_link_set_tcp_syn_filter(app, p_link);
1087 if (app_link_rss_enabled(p_link))
1088 app_link_rss_setup(p_link);
1091 app_link_up_internal(app, p_link);
1094 app_check_link(app);
1098 app_init_swq(struct app_params *app)
1102 for (i = 0; i < app->n_pktq_swq; i++) {
1103 struct app_pktq_swq_params *p = &app->swq_params[i];
1106 if (app_swq_get_readers(app, p) == 1)
1107 flags |= RING_F_SC_DEQ;
1108 if (app_swq_get_writers(app, p) == 1)
1109 flags |= RING_F_SP_ENQ;
1111 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1112 app->swq[i] = rte_ring_create(
1118 if (app->swq[i] == NULL)
1119 rte_panic("%s init error\n", p->name);
1124 app_init_tm(struct app_params *app)
1128 for (i = 0; i < app->n_pktq_tm; i++) {
1129 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1130 struct app_link_params *p_link;
1131 struct rte_eth_link link_eth_params;
1132 struct rte_sched_port *sched;
1133 uint32_t n_subports, subport_id;
1136 p_link = app_get_link_for_tm(app, p_tm);
1138 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1141 p_tm->sched_port_params.name = p_tm->name;
1142 p_tm->sched_port_params.socket =
1143 app_get_cpu_socket_id(p_link->pmd_id);
1144 p_tm->sched_port_params.rate =
1145 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1147 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1148 sched = rte_sched_port_config(&p_tm->sched_port_params);
1150 rte_panic("%s init error\n", p_tm->name);
1154 n_subports = p_tm->sched_port_params.n_subports_per_port;
1155 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1156 uint32_t n_pipes_per_subport, pipe_id;
1158 status = rte_sched_subport_config(sched,
1160 &p_tm->sched_subport_params[subport_id]);
1162 rte_panic("%s subport %" PRIu32
1163 " init error (%" PRId32 ")\n",
1164 p_tm->name, subport_id, status);
1167 n_pipes_per_subport =
1168 p_tm->sched_port_params.n_pipes_per_subport;
1170 pipe_id < n_pipes_per_subport;
1172 int profile_id = p_tm->sched_pipe_to_profile[
1173 subport_id * APP_MAX_SCHED_PIPES +
1176 if (profile_id == -1)
1179 status = rte_sched_pipe_config(sched,
1184 rte_panic("%s subport %" PRIu32
1186 " (profile %" PRId32 ") "
1187 "init error (% " PRId32 ")\n",
1188 p_tm->name, subport_id, pipe_id,
1189 profile_id, status);
1195 #ifndef RTE_EXEC_ENV_LINUXAPP
1197 app_init_tap(struct app_params *app) {
1198 if (app->n_pktq_tap == 0)
1201 rte_panic("TAP device not supported.\n");
1205 app_init_tap(struct app_params *app)
1209 for (i = 0; i < app->n_pktq_tap; i++) {
1210 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1214 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1216 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1218 rte_panic("Cannot open file /dev/net/tun\n");
1220 memset(&ifr, 0, sizeof(ifr));
1221 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1222 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1224 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1226 rte_panic("TAP setup error\n");
1233 #ifdef RTE_LIBRTE_KNI
1235 kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
1238 if (port_id >= rte_eth_dev_count())
1242 rte_eth_dev_set_link_up(port_id) :
1243 rte_eth_dev_set_link_down(port_id);
1249 kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
1252 if (port_id >= rte_eth_dev_count())
1255 if (new_mtu > ETHER_MAX_LEN)
1259 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1265 #endif /* RTE_LIBRTE_KNI */
1267 #ifndef RTE_LIBRTE_KNI
1269 app_init_kni(struct app_params *app) {
1270 if (app->n_pktq_kni == 0)
1273 rte_panic("Can not init KNI without librte_kni support.\n");
1277 app_init_kni(struct app_params *app) {
1280 if (app->n_pktq_kni == 0)
1283 rte_kni_init(app->n_pktq_kni);
1285 for (i = 0; i < app->n_pktq_kni; i++) {
1286 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1287 struct app_link_params *p_link;
1288 struct rte_eth_dev_info dev_info;
1289 struct app_mempool_params *mempool_params;
1290 struct rte_mempool *mempool;
1291 struct rte_kni_conf conf;
1292 struct rte_kni_ops ops;
1295 p_link = app_get_link_for_kni(app, p_kni);
1296 memset(&dev_info, 0, sizeof(dev_info));
1297 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1300 mempool_params = &app->mempool_params[p_kni->mempool_id];
1301 mempool = app->mempool[p_kni->mempool_id];
1304 memset(&conf, 0, sizeof(conf));
1305 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1306 conf.force_bind = p_kni->force_bind;
1307 if (conf.force_bind) {
1310 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1313 p_kni->hyper_th_id);
1316 rte_panic("%s invalid CPU core\n", p_kni->name);
1318 conf.core_id = (uint32_t) lcore_id;
1320 conf.group_id = p_link->pmd_id;
1321 conf.mbuf_size = mempool_params->buffer_size;
1322 conf.addr = dev_info.pci_dev->addr;
1323 conf.id = dev_info.pci_dev->id;
1325 memset(&ops, 0, sizeof(ops));
1326 ops.port_id = (uint8_t) p_link->pmd_id;
1327 ops.change_mtu = kni_change_mtu;
1328 ops.config_network_if = kni_config_network_interface;
1330 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1331 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1333 rte_panic("%s init error\n", p_kni->name);
1336 #endif /* RTE_LIBRTE_KNI */
1339 app_init_msgq(struct app_params *app)
1343 for (i = 0; i < app->n_msgq; i++) {
1344 struct app_msgq_params *p = &app->msgq_params[i];
1346 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1347 app->msgq[i] = rte_ring_create(
1351 RING_F_SP_ENQ | RING_F_SC_DEQ);
1353 if (app->msgq[i] == NULL)
1354 rte_panic("%s init error\n", p->name);
1358 void app_pipeline_params_get(struct app_params *app,
1359 struct app_pipeline_params *p_in,
1360 struct pipeline_params *p_out)
1364 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1366 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1368 p_out->socket_id = (int) p_in->socket_id;
1370 p_out->log_level = app->log_level;
1373 p_out->n_ports_in = p_in->n_pktq_in;
1374 for (i = 0; i < p_in->n_pktq_in; i++) {
1375 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1376 struct pipeline_port_in_params *out = &p_out->port_in[i];
1379 case APP_PKTQ_IN_HWQ:
1381 struct app_pktq_hwq_in_params *p_hwq_in =
1382 &app->hwq_in_params[in->id];
1383 struct app_link_params *p_link =
1384 app_get_link_for_rxq(app, p_hwq_in);
1385 uint32_t rxq_link_id, rxq_queue_id;
1387 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1391 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1392 out->params.ethdev.port_id = p_link->pmd_id;
1393 out->params.ethdev.queue_id = rxq_queue_id;
1394 out->burst_size = p_hwq_in->burst;
1397 case APP_PKTQ_IN_SWQ:
1399 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1401 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1402 if (app_swq_get_readers(app, swq_params) == 1) {
1403 out->type = PIPELINE_PORT_IN_RING_READER;
1404 out->params.ring.ring = app->swq[in->id];
1405 out->burst_size = app->swq_params[in->id].burst_read;
1407 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1408 out->params.ring_multi.ring = app->swq[in->id];
1409 out->burst_size = swq_params->burst_read;
1412 if (swq_params->ipv4_frag == 1) {
1413 struct rte_port_ring_reader_ipv4_frag_params *params =
1414 &out->params.ring_ipv4_frag;
1416 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1417 params->ring = app->swq[in->id];
1418 params->mtu = swq_params->mtu;
1419 params->metadata_size = swq_params->metadata_size;
1420 params->pool_direct =
1421 app->mempool[swq_params->mempool_direct_id];
1422 params->pool_indirect =
1423 app->mempool[swq_params->mempool_indirect_id];
1424 out->burst_size = swq_params->burst_read;
1426 struct rte_port_ring_reader_ipv6_frag_params *params =
1427 &out->params.ring_ipv6_frag;
1429 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1430 params->ring = app->swq[in->id];
1431 params->mtu = swq_params->mtu;
1432 params->metadata_size = swq_params->metadata_size;
1433 params->pool_direct =
1434 app->mempool[swq_params->mempool_direct_id];
1435 params->pool_indirect =
1436 app->mempool[swq_params->mempool_indirect_id];
1437 out->burst_size = swq_params->burst_read;
1442 case APP_PKTQ_IN_TM:
1444 out->type = PIPELINE_PORT_IN_SCHED_READER;
1445 out->params.sched.sched = app->tm[in->id];
1446 out->burst_size = app->tm_params[in->id].burst_read;
1449 #ifdef RTE_EXEC_ENV_LINUXAPP
1450 case APP_PKTQ_IN_TAP:
1452 struct app_pktq_tap_params *tap_params =
1453 &app->tap_params[in->id];
1454 struct app_mempool_params *mempool_params =
1455 &app->mempool_params[tap_params->mempool_id];
1456 struct rte_mempool *mempool =
1457 app->mempool[tap_params->mempool_id];
1459 out->type = PIPELINE_PORT_IN_FD_READER;
1460 out->params.fd.fd = app->tap[in->id];
1461 out->params.fd.mtu = mempool_params->buffer_size;
1462 out->params.fd.mempool = mempool;
1463 out->burst_size = app->tap_params[in->id].burst_read;
1467 #ifdef RTE_LIBRTE_KNI
1468 case APP_PKTQ_IN_KNI:
1470 out->type = PIPELINE_PORT_IN_KNI_READER;
1471 out->params.kni.kni = app->kni[in->id];
1472 out->burst_size = app->kni_params[in->id].burst_read;
1475 #endif /* RTE_LIBRTE_KNI */
1476 case APP_PKTQ_IN_SOURCE:
1478 uint32_t mempool_id =
1479 app->source_params[in->id].mempool_id;
1481 out->type = PIPELINE_PORT_IN_SOURCE;
1482 out->params.source.mempool = app->mempool[mempool_id];
1483 out->burst_size = app->source_params[in->id].burst;
1484 out->params.source.file_name =
1485 app->source_params[in->id].file_name;
1486 out->params.source.n_bytes_per_pkt =
1487 app->source_params[in->id].n_bytes_per_pkt;
1496 p_out->n_ports_out = p_in->n_pktq_out;
1497 for (i = 0; i < p_in->n_pktq_out; i++) {
1498 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1499 struct pipeline_port_out_params *out = &p_out->port_out[i];
1502 case APP_PKTQ_OUT_HWQ:
1504 struct app_pktq_hwq_out_params *p_hwq_out =
1505 &app->hwq_out_params[in->id];
1506 struct app_link_params *p_link =
1507 app_get_link_for_txq(app, p_hwq_out);
1508 uint32_t txq_link_id, txq_queue_id;
1510 sscanf(p_hwq_out->name,
1511 "TXQ%" SCNu32 ".%" SCNu32,
1515 if (p_hwq_out->dropless == 0) {
1516 struct rte_port_ethdev_writer_params *params =
1517 &out->params.ethdev;
1519 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1520 params->port_id = p_link->pmd_id;
1521 params->queue_id = txq_queue_id;
1522 params->tx_burst_sz =
1523 app->hwq_out_params[in->id].burst;
1525 struct rte_port_ethdev_writer_nodrop_params
1526 *params = &out->params.ethdev_nodrop;
1529 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1530 params->port_id = p_link->pmd_id;
1531 params->queue_id = txq_queue_id;
1532 params->tx_burst_sz = p_hwq_out->burst;
1533 params->n_retries = p_hwq_out->n_retries;
1537 case APP_PKTQ_OUT_SWQ:
1539 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1541 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1542 if (app_swq_get_writers(app, swq_params) == 1) {
1543 if (app->swq_params[in->id].dropless == 0) {
1544 struct rte_port_ring_writer_params *params =
1547 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1548 params->ring = app->swq[in->id];
1549 params->tx_burst_sz =
1550 app->swq_params[in->id].burst_write;
1552 struct rte_port_ring_writer_nodrop_params
1553 *params = &out->params.ring_nodrop;
1556 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1557 params->ring = app->swq[in->id];
1558 params->tx_burst_sz =
1559 app->swq_params[in->id].burst_write;
1561 app->swq_params[in->id].n_retries;
1564 if (swq_params->dropless == 0) {
1565 struct rte_port_ring_multi_writer_params *params =
1566 &out->params.ring_multi;
1568 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1569 params->ring = app->swq[in->id];
1570 params->tx_burst_sz = swq_params->burst_write;
1572 struct rte_port_ring_multi_writer_nodrop_params
1573 *params = &out->params.ring_multi_nodrop;
1575 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1576 params->ring = app->swq[in->id];
1577 params->tx_burst_sz = swq_params->burst_write;
1578 params->n_retries = swq_params->n_retries;
1582 if (swq_params->ipv4_ras == 1) {
1583 struct rte_port_ring_writer_ipv4_ras_params *params =
1584 &out->params.ring_ipv4_ras;
1586 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1587 params->ring = app->swq[in->id];
1588 params->tx_burst_sz = swq_params->burst_write;
1590 struct rte_port_ring_writer_ipv6_ras_params *params =
1591 &out->params.ring_ipv6_ras;
1593 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1594 params->ring = app->swq[in->id];
1595 params->tx_burst_sz = swq_params->burst_write;
1600 case APP_PKTQ_OUT_TM:
1602 struct rte_port_sched_writer_params *params =
1605 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1606 params->sched = app->tm[in->id];
1607 params->tx_burst_sz =
1608 app->tm_params[in->id].burst_write;
1611 #ifdef RTE_EXEC_ENV_LINUXAPP
1612 case APP_PKTQ_OUT_TAP:
1614 struct rte_port_fd_writer_params *params =
1617 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1618 params->fd = app->tap[in->id];
1619 params->tx_burst_sz =
1620 app->tap_params[in->id].burst_write;
1624 #ifdef RTE_LIBRTE_KNI
1625 case APP_PKTQ_OUT_KNI:
1627 struct app_pktq_kni_params *p_kni =
1628 &app->kni_params[in->id];
1630 if (p_kni->dropless == 0) {
1631 struct rte_port_kni_writer_params *params =
1634 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1635 params->kni = app->kni[in->id];
1636 params->tx_burst_sz =
1637 app->kni_params[in->id].burst_write;
1639 struct rte_port_kni_writer_nodrop_params
1640 *params = &out->params.kni_nodrop;
1642 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1643 params->kni = app->kni[in->id];
1644 params->tx_burst_sz =
1645 app->kni_params[in->id].burst_write;
1647 app->kni_params[in->id].n_retries;
1651 #endif /* RTE_LIBRTE_KNI */
1652 case APP_PKTQ_OUT_SINK:
1654 out->type = PIPELINE_PORT_OUT_SINK;
1655 out->params.sink.file_name =
1656 app->sink_params[in->id].file_name;
1657 out->params.sink.max_n_pkts =
1658 app->sink_params[in->id].
1669 p_out->n_msgq = p_in->n_msgq_in;
1671 for (i = 0; i < p_in->n_msgq_in; i++)
1672 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1674 for (i = 0; i < p_in->n_msgq_out; i++)
1675 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1678 p_out->n_args = p_in->n_args;
1679 for (i = 0; i < p_in->n_args; i++) {
1680 p_out->args_name[i] = p_in->args_name[i];
1681 p_out->args_value[i] = p_in->args_value[i];
1686 app_init_pipelines(struct app_params *app)
1690 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1691 struct app_pipeline_params *params =
1692 &app->pipeline_params[p_id];
1693 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1694 struct pipeline_type *ptype;
1695 struct pipeline_params pp;
1697 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1699 ptype = app_pipeline_type_find(app, params->type);
1701 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1704 app_pipeline_params_get(app, params, &pp);
1708 if (ptype->be_ops->f_init) {
1709 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1711 if (data->be == NULL)
1712 rte_panic("Pipeline instance \"%s\" back-end "
1713 "init error\n", params->name);
1718 if (ptype->fe_ops->f_init) {
1719 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1721 if (data->fe == NULL)
1722 rte_panic("Pipeline instance \"%s\" front-end "
1723 "init error\n", params->name);
1726 data->ptype = ptype;
1728 data->timer_period = (rte_get_tsc_hz() *
1729 params->timer_period) / 100;
1734 app_post_init_pipelines(struct app_params *app)
1738 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1739 struct app_pipeline_params *params =
1740 &app->pipeline_params[p_id];
1741 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1744 if (data->ptype->fe_ops->f_post_init == NULL)
1747 status = data->ptype->fe_ops->f_post_init(data->fe);
1749 rte_panic("Pipeline instance \"%s\" front-end "
1750 "post-init error\n", params->name);
1755 app_init_threads(struct app_params *app)
1757 uint64_t time = rte_get_tsc_cycles();
1760 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1761 struct app_pipeline_params *params =
1762 &app->pipeline_params[p_id];
1763 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1764 struct pipeline_type *ptype;
1765 struct app_thread_data *t;
1766 struct app_thread_pipeline_data *p;
1769 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1772 params->hyper_th_id);
1775 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1778 (params->hyper_th_id) ? "h" : "");
1780 t = &app->thread_data[lcore_id];
1782 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1783 t->thread_req_deadline = time + t->timer_period;
1785 t->headroom_cycles = 0;
1786 t->headroom_time = rte_get_tsc_cycles();
1787 t->headroom_ratio = 0.0;
1789 t->msgq_in = app_thread_msgq_in_get(app,
1792 params->hyper_th_id);
1793 if (t->msgq_in == NULL)
1794 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1797 t->msgq_out = app_thread_msgq_out_get(app,
1800 params->hyper_th_id);
1801 if (t->msgq_out == NULL)
1802 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1805 ptype = app_pipeline_type_find(app, params->type);
1807 rte_panic("Init error: Unknown pipeline "
1808 "type \"%s\"\n", params->type);
1810 p = (ptype->be_ops->f_run == NULL) ?
1811 &t->regular[t->n_regular] :
1812 &t->custom[t->n_custom];
1814 p->pipeline_id = p_id;
1816 p->f_run = ptype->be_ops->f_run;
1817 p->f_timer = ptype->be_ops->f_timer;
1818 p->timer_period = data->timer_period;
1819 p->deadline = time + data->timer_period;
1823 if (ptype->be_ops->f_run == NULL)
1830 int app_init(struct app_params *app)
1832 app_init_core_map(app);
1833 app_init_core_mask(app);
1836 app_init_mempool(app);
1844 app_pipeline_common_cmd_push(app);
1845 app_pipeline_thread_cmd_push(app);
1846 app_pipeline_type_register(app, &pipeline_master);
1847 app_pipeline_type_register(app, &pipeline_passthrough);
1848 app_pipeline_type_register(app, &pipeline_flow_classification);
1849 app_pipeline_type_register(app, &pipeline_flow_actions);
1850 app_pipeline_type_register(app, &pipeline_firewall);
1851 app_pipeline_type_register(app, &pipeline_routing);
1853 app_init_pipelines(app);
1854 app_init_threads(app);
1859 int app_post_init(struct app_params *app)
1861 app_post_init_pipelines(app);
1867 app_pipeline_type_cmd_push(struct app_params *app,
1868 struct pipeline_type *ptype)
1870 cmdline_parse_ctx_t *cmds;
1873 /* Check input arguments */
1874 if ((app == NULL) ||
1878 n_cmds = pipeline_type_cmds_count(ptype);
1882 cmds = ptype->fe_ops->cmds;
1884 /* Check for available slots in the application commands array */
1885 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1888 /* Push pipeline commands into the application */
1889 memcpy(&app->cmds[app->n_cmds],
1891 n_cmds * sizeof(cmdline_parse_ctx_t));
1893 for (i = 0; i < n_cmds; i++)
1894 app->cmds[app->n_cmds + i]->data = app;
1896 app->n_cmds += n_cmds;
1897 app->cmds[app->n_cmds] = NULL;
1903 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1907 /* Check input arguments */
1908 if ((app == NULL) ||
1910 (ptype->name == NULL) ||
1911 (strlen(ptype->name) == 0) ||
1912 (ptype->be_ops->f_init == NULL) ||
1913 (ptype->be_ops->f_timer == NULL))
1916 /* Check for duplicate entry */
1917 for (i = 0; i < app->n_pipeline_types; i++)
1918 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1921 /* Check for resource availability */
1922 n_cmds = pipeline_type_cmds_count(ptype);
1923 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1924 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1927 /* Copy pipeline type */
1928 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1930 sizeof(struct pipeline_type));
1932 /* Copy CLI commands */
1934 app_pipeline_type_cmd_push(app, ptype);
1940 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1944 for (i = 0; i < app->n_pipeline_types; i++)
1945 if (strcmp(app->pipeline_type[i].name, name) == 0)
1946 return &app->pipeline_type[i];