4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <netinet/in.h>
39 #include <linux/if_tun.h>
41 #include <sys/ioctl.h>
44 #include <rte_cycles.h>
45 #include <rte_ethdev.h>
46 #include <rte_ether.h>
49 #include <rte_malloc.h>
53 #include "pipeline_common_fe.h"
54 #include "pipeline_master.h"
55 #include "pipeline_passthrough.h"
56 #include "pipeline_firewall.h"
57 #include "pipeline_flow_classification.h"
58 #include "pipeline_flow_actions.h"
59 #include "pipeline_routing.h"
60 #include "thread_fe.h"
62 #define APP_NAME_SIZE 32
64 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
67 app_init_core_map(struct app_params *app)
69 APP_LOG(app, HIGH, "Initializing CPU core map ...");
70 app->core_map = cpu_core_map_init(4, 32, 4, 0);
72 if (app->core_map == NULL)
73 rte_panic("Cannot create CPU core map\n");
75 if (app->log_level >= APP_LOG_LEVEL_LOW)
76 cpu_core_map_print(app->core_map);
80 app_init_core_mask(struct app_params *app)
85 for (i = 0; i < app->n_pipelines; i++) {
86 struct app_pipeline_params *p = &app->pipeline_params[i];
89 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
95 rte_panic("Cannot create CPU core mask\n");
97 mask |= 1LLU << lcore_id;
100 app->core_mask = mask;
101 APP_LOG(app, HIGH, "CPU core mask = 0x%016" PRIx64, app->core_mask);
105 app_init_eal(struct app_params *app)
108 struct app_eal_params *p = &app->eal_params;
113 app->eal_argv[n_args++] = strdup(app->app_name);
115 snprintf(buffer, sizeof(buffer), "-c%" PRIx64, app->core_mask);
116 app->eal_argv[n_args++] = strdup(buffer);
119 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
120 app->eal_argv[n_args++] = strdup(buffer);
123 if (p->master_lcore_present) {
126 "--master-lcore=%" PRIu32,
128 app->eal_argv[n_args++] = strdup(buffer);
131 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
132 app->eal_argv[n_args++] = strdup(buffer);
134 if (p->memory_present) {
135 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
136 app->eal_argv[n_args++] = strdup(buffer);
139 if (p->ranks_present) {
140 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
141 app->eal_argv[n_args++] = strdup(buffer);
144 for (i = 0; i < APP_MAX_LINKS; i++) {
145 if (p->pci_blacklist[i] == NULL)
150 "--pci-blacklist=%s",
151 p->pci_blacklist[i]);
152 app->eal_argv[n_args++] = strdup(buffer);
155 if (app->port_mask != 0)
156 for (i = 0; i < APP_MAX_LINKS; i++) {
157 if (p->pci_whitelist[i] == NULL)
162 "--pci-whitelist=%s",
163 p->pci_whitelist[i]);
164 app->eal_argv[n_args++] = strdup(buffer);
167 for (i = 0; i < app->n_links; i++) {
168 char *pci_bdf = app->link_params[i].pci_bdf;
172 "--pci-whitelist=%s",
174 app->eal_argv[n_args++] = strdup(buffer);
177 for (i = 0; i < APP_MAX_LINKS; i++) {
178 if (p->vdev[i] == NULL)
185 app->eal_argv[n_args++] = strdup(buffer);
188 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
189 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
190 app->eal_argv[n_args++] = strdup(buffer);
198 app->eal_argv[n_args++] = strdup(buffer);
202 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
203 app->eal_argv[n_args++] = strdup(buffer);
206 if (p->log_level_present) {
209 "--log-level=%" PRIu32,
211 app->eal_argv[n_args++] = strdup(buffer);
214 if ((p->version_present) && p->version) {
215 snprintf(buffer, sizeof(buffer), "-v");
216 app->eal_argv[n_args++] = strdup(buffer);
219 if ((p->help_present) && p->help) {
220 snprintf(buffer, sizeof(buffer), "--help");
221 app->eal_argv[n_args++] = strdup(buffer);
224 if ((p->no_huge_present) && p->no_huge) {
225 snprintf(buffer, sizeof(buffer), "--no-huge");
226 app->eal_argv[n_args++] = strdup(buffer);
229 if ((p->no_pci_present) && p->no_pci) {
230 snprintf(buffer, sizeof(buffer), "--no-pci");
231 app->eal_argv[n_args++] = strdup(buffer);
234 if ((p->no_hpet_present) && p->no_hpet) {
235 snprintf(buffer, sizeof(buffer), "--no-hpet");
236 app->eal_argv[n_args++] = strdup(buffer);
239 if ((p->no_shconf_present) && p->no_shconf) {
240 snprintf(buffer, sizeof(buffer), "--no-shconf");
241 app->eal_argv[n_args++] = strdup(buffer);
245 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
246 app->eal_argv[n_args++] = strdup(buffer);
254 app->eal_argv[n_args++] = strdup(buffer);
258 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
259 app->eal_argv[n_args++] = strdup(buffer);
262 if (p->file_prefix) {
267 app->eal_argv[n_args++] = strdup(buffer);
270 if (p->base_virtaddr) {
273 "--base-virtaddr=%s",
275 app->eal_argv[n_args++] = strdup(buffer);
278 if ((p->create_uio_dev_present) && p->create_uio_dev) {
279 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
280 app->eal_argv[n_args++] = strdup(buffer);
288 app->eal_argv[n_args++] = strdup(buffer);
291 if ((p->xen_dom0_present) && (p->xen_dom0)) {
292 snprintf(buffer, sizeof(buffer), "--xen-dom0");
293 app->eal_argv[n_args++] = strdup(buffer);
296 snprintf(buffer, sizeof(buffer), "--");
297 app->eal_argv[n_args++] = strdup(buffer);
299 app->eal_argc = n_args;
301 APP_LOG(app, HIGH, "Initializing EAL ...");
302 if (app->log_level >= APP_LOG_LEVEL_LOW) {
305 fprintf(stdout, "[APP] EAL arguments: \"");
306 for (i = 1; i < app->eal_argc; i++)
307 fprintf(stdout, "%s ", app->eal_argv[i]);
308 fprintf(stdout, "\"\n");
311 status = rte_eal_init(app->eal_argc, app->eal_argv);
313 rte_panic("EAL init error\n");
317 app_init_mempool(struct app_params *app)
321 for (i = 0; i < app->n_mempools; i++) {
322 struct app_mempool_params *p = &app->mempool_params[i];
324 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
325 app->mempool[i] = rte_mempool_create(
330 sizeof(struct rte_pktmbuf_pool_private),
331 rte_pktmbuf_pool_init, NULL,
332 rte_pktmbuf_init, NULL,
336 if (app->mempool[i] == NULL)
337 rte_panic("%s init error\n", p->name);
342 app_link_filter_arp_add(struct app_link_params *link)
344 struct rte_eth_ethertype_filter filter = {
345 .ether_type = ETHER_TYPE_ARP,
347 .queue = link->arp_q,
350 return rte_eth_dev_filter_ctrl(link->pmd_id,
351 RTE_ETH_FILTER_ETHERTYPE,
357 app_link_filter_tcp_syn_add(struct app_link_params *link)
359 struct rte_eth_syn_filter filter = {
361 .queue = link->tcp_syn_q,
364 return rte_eth_dev_filter_ctrl(link->pmd_id,
371 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
373 struct rte_eth_ntuple_filter filter = {
374 .flags = RTE_5TUPLE_FLAGS,
375 .dst_ip = rte_bswap32(l2->ip),
376 .dst_ip_mask = UINT32_MAX, /* Enable */
378 .src_ip_mask = 0, /* Disable */
380 .dst_port_mask = 0, /* Disable */
382 .src_port_mask = 0, /* Disable */
384 .proto_mask = 0, /* Disable */
386 .priority = 1, /* Lowest */
387 .queue = l1->ip_local_q,
390 return rte_eth_dev_filter_ctrl(l1->pmd_id,
391 RTE_ETH_FILTER_NTUPLE,
397 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
399 struct rte_eth_ntuple_filter filter = {
400 .flags = RTE_5TUPLE_FLAGS,
401 .dst_ip = rte_bswap32(l2->ip),
402 .dst_ip_mask = UINT32_MAX, /* Enable */
404 .src_ip_mask = 0, /* Disable */
406 .dst_port_mask = 0, /* Disable */
408 .src_port_mask = 0, /* Disable */
410 .proto_mask = 0, /* Disable */
412 .priority = 1, /* Lowest */
413 .queue = l1->ip_local_q,
416 return rte_eth_dev_filter_ctrl(l1->pmd_id,
417 RTE_ETH_FILTER_NTUPLE,
418 RTE_ETH_FILTER_DELETE,
423 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
425 struct rte_eth_ntuple_filter filter = {
426 .flags = RTE_5TUPLE_FLAGS,
427 .dst_ip = rte_bswap32(l2->ip),
428 .dst_ip_mask = UINT32_MAX, /* Enable */
430 .src_ip_mask = 0, /* Disable */
432 .dst_port_mask = 0, /* Disable */
434 .src_port_mask = 0, /* Disable */
435 .proto = IPPROTO_TCP,
436 .proto_mask = UINT8_MAX, /* Enable */
438 .priority = 2, /* Higher priority than IP */
439 .queue = l1->tcp_local_q,
442 return rte_eth_dev_filter_ctrl(l1->pmd_id,
443 RTE_ETH_FILTER_NTUPLE,
449 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
451 struct rte_eth_ntuple_filter filter = {
452 .flags = RTE_5TUPLE_FLAGS,
453 .dst_ip = rte_bswap32(l2->ip),
454 .dst_ip_mask = UINT32_MAX, /* Enable */
456 .src_ip_mask = 0, /* Disable */
458 .dst_port_mask = 0, /* Disable */
460 .src_port_mask = 0, /* Disable */
461 .proto = IPPROTO_TCP,
462 .proto_mask = UINT8_MAX, /* Enable */
464 .priority = 2, /* Higher priority than IP */
465 .queue = l1->tcp_local_q,
468 return rte_eth_dev_filter_ctrl(l1->pmd_id,
469 RTE_ETH_FILTER_NTUPLE,
470 RTE_ETH_FILTER_DELETE,
475 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
477 struct rte_eth_ntuple_filter filter = {
478 .flags = RTE_5TUPLE_FLAGS,
479 .dst_ip = rte_bswap32(l2->ip),
480 .dst_ip_mask = UINT32_MAX, /* Enable */
482 .src_ip_mask = 0, /* Disable */
484 .dst_port_mask = 0, /* Disable */
486 .src_port_mask = 0, /* Disable */
487 .proto = IPPROTO_UDP,
488 .proto_mask = UINT8_MAX, /* Enable */
490 .priority = 2, /* Higher priority than IP */
491 .queue = l1->udp_local_q,
494 return rte_eth_dev_filter_ctrl(l1->pmd_id,
495 RTE_ETH_FILTER_NTUPLE,
501 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
503 struct rte_eth_ntuple_filter filter = {
504 .flags = RTE_5TUPLE_FLAGS,
505 .dst_ip = rte_bswap32(l2->ip),
506 .dst_ip_mask = UINT32_MAX, /* Enable */
508 .src_ip_mask = 0, /* Disable */
510 .dst_port_mask = 0, /* Disable */
512 .src_port_mask = 0, /* Disable */
513 .proto = IPPROTO_UDP,
514 .proto_mask = UINT8_MAX, /* Enable */
516 .priority = 2, /* Higher priority than IP */
517 .queue = l1->udp_local_q,
520 return rte_eth_dev_filter_ctrl(l1->pmd_id,
521 RTE_ETH_FILTER_NTUPLE,
522 RTE_ETH_FILTER_DELETE,
527 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
529 struct rte_eth_ntuple_filter filter = {
530 .flags = RTE_5TUPLE_FLAGS,
531 .dst_ip = rte_bswap32(l2->ip),
532 .dst_ip_mask = UINT32_MAX, /* Enable */
534 .src_ip_mask = 0, /* Disable */
536 .dst_port_mask = 0, /* Disable */
538 .src_port_mask = 0, /* Disable */
539 .proto = IPPROTO_SCTP,
540 .proto_mask = UINT8_MAX, /* Enable */
542 .priority = 2, /* Higher priority than IP */
543 .queue = l1->sctp_local_q,
546 return rte_eth_dev_filter_ctrl(l1->pmd_id,
547 RTE_ETH_FILTER_NTUPLE,
553 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
555 struct rte_eth_ntuple_filter filter = {
556 .flags = RTE_5TUPLE_FLAGS,
557 .dst_ip = rte_bswap32(l2->ip),
558 .dst_ip_mask = UINT32_MAX, /* Enable */
560 .src_ip_mask = 0, /* Disable */
562 .dst_port_mask = 0, /* Disable */
564 .src_port_mask = 0, /* Disable */
565 .proto = IPPROTO_SCTP,
566 .proto_mask = UINT8_MAX, /* Enable */
568 .priority = 2, /* Higher priority than IP */
569 .queue = l1->sctp_local_q,
572 return rte_eth_dev_filter_ctrl(l1->pmd_id,
573 RTE_ETH_FILTER_NTUPLE,
574 RTE_ETH_FILTER_DELETE,
579 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
581 if (cp->arp_q != 0) {
582 int status = app_link_filter_arp_add(cp);
584 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
585 "Adding ARP filter (queue = %" PRIu32 ")",
586 cp->name, cp->pmd_id, cp->arp_q);
589 rte_panic("%s (%" PRIu32 "): "
590 "Error adding ARP filter "
591 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
592 cp->name, cp->pmd_id, cp->arp_q, status);
597 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
599 if (cp->tcp_syn_q != 0) {
600 int status = app_link_filter_tcp_syn_add(cp);
602 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
603 "Adding TCP SYN filter (queue = %" PRIu32 ")",
604 cp->name, cp->pmd_id, cp->tcp_syn_q);
607 rte_panic("%s (%" PRIu32 "): "
608 "Error adding TCP SYN filter "
609 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
610 cp->name, cp->pmd_id, cp->tcp_syn_q,
616 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
621 /* For each link, add filters for IP of current link */
623 for (i = 0; i < app->n_links; i++) {
624 struct app_link_params *p = &app->link_params[i];
627 if (p->ip_local_q != 0) {
628 int status = app_link_filter_ip_add(p, cp);
630 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
631 "Adding IP filter (queue= %" PRIu32
632 ", IP = 0x%08" PRIx32 ")",
633 p->name, p->pmd_id, p->ip_local_q,
637 rte_panic("%s (%" PRIu32 "): "
639 "filter (queue= %" PRIu32 ", "
643 p->ip_local_q, cp->ip, status);
647 if (p->tcp_local_q != 0) {
648 int status = app_link_filter_tcp_add(p, cp);
650 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
653 ", IP = 0x%08" PRIx32 ")",
654 p->name, p->pmd_id, p->tcp_local_q,
658 rte_panic("%s (%" PRIu32 "): "
660 "filter (queue = %" PRIu32 ", "
664 p->tcp_local_q, cp->ip, status);
668 if (p->udp_local_q != 0) {
669 int status = app_link_filter_udp_add(p, cp);
671 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
674 ", IP = 0x%08" PRIx32 ")",
675 p->name, p->pmd_id, p->udp_local_q,
679 rte_panic("%s (%" PRIu32 "): "
681 "filter (queue = %" PRIu32 ", "
685 p->udp_local_q, cp->ip, status);
689 if (p->sctp_local_q != 0) {
690 int status = app_link_filter_sctp_add(p, cp);
692 APP_LOG(app, LOW, "%s (%" PRIu32
693 "): Adding SCTP filter "
695 ", IP = 0x%08" PRIx32 ")",
696 p->name, p->pmd_id, p->sctp_local_q,
700 rte_panic("%s (%" PRIu32 "): "
702 "filter (queue = %" PRIu32 ", "
706 p->sctp_local_q, cp->ip,
713 status = rte_eth_dev_set_link_up(cp->pmd_id);
715 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
716 PRId32 "\n", cp->name, cp->pmd_id, status);
718 /* Mark link as UP */
723 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
729 status = rte_eth_dev_set_link_down(cp->pmd_id);
731 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
732 PRId32 "\n", cp->name, cp->pmd_id, status);
734 /* Mark link as DOWN */
737 /* Return if current link IP is not valid */
741 /* For each link, remove filters for IP of current link */
742 for (i = 0; i < app->n_links; i++) {
743 struct app_link_params *p = &app->link_params[i];
746 if (p->ip_local_q != 0) {
747 int status = app_link_filter_ip_del(p, cp);
749 APP_LOG(app, LOW, "%s (%" PRIu32
750 "): Deleting IP filter "
751 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
752 p->name, p->pmd_id, p->ip_local_q, cp->ip);
755 rte_panic("%s (%" PRIu32
756 "): Error deleting IP filter "
760 p->name, p->pmd_id, p->ip_local_q,
765 if (p->tcp_local_q != 0) {
766 int status = app_link_filter_tcp_del(p, cp);
768 APP_LOG(app, LOW, "%s (%" PRIu32
769 "): Deleting TCP filter "
771 ", IP = 0x%" PRIx32 ")",
772 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
775 rte_panic("%s (%" PRIu32
776 "): Error deleting TCP filter "
780 p->name, p->pmd_id, p->tcp_local_q,
785 if (p->udp_local_q != 0) {
786 int status = app_link_filter_udp_del(p, cp);
788 APP_LOG(app, LOW, "%s (%" PRIu32
789 "): Deleting UDP filter "
790 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
791 p->name, p->pmd_id, p->udp_local_q, cp->ip);
794 rte_panic("%s (%" PRIu32
795 "): Error deleting UDP filter "
799 p->name, p->pmd_id, p->udp_local_q,
804 if (p->sctp_local_q != 0) {
805 int status = app_link_filter_sctp_del(p, cp);
807 APP_LOG(app, LOW, "%s (%" PRIu32
808 "): Deleting SCTP filter "
810 ", IP = 0x%" PRIx32 ")",
811 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
814 rte_panic("%s (%" PRIu32
815 "): Error deleting SCTP filter "
819 p->name, p->pmd_id, p->sctp_local_q,
826 app_check_link(struct app_params *app)
828 uint32_t all_links_up, i;
832 for (i = 0; i < app->n_links; i++) {
833 struct app_link_params *p = &app->link_params[i];
834 struct rte_eth_link link_params;
836 memset(&link_params, 0, sizeof(link_params));
837 rte_eth_link_get(p->pmd_id, &link_params);
839 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
842 link_params.link_speed / 1000,
843 link_params.link_status ? "UP" : "DOWN");
845 if (link_params.link_status == ETH_LINK_DOWN)
849 if (all_links_up == 0)
850 rte_panic("Some links are DOWN\n");
854 is_any_swq_frag_or_ras(struct app_params *app)
858 for (i = 0; i < app->n_pktq_swq; i++) {
859 struct app_pktq_swq_params *p = &app->swq_params[i];
861 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
862 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
870 app_init_link_frag_ras(struct app_params *app)
874 if (is_any_swq_frag_or_ras(app)) {
875 for (i = 0; i < app->n_pktq_hwq_out; i++) {
876 struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];
878 p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
884 app_get_cpu_socket_id(uint32_t pmd_id)
886 int status = rte_eth_dev_socket_id(pmd_id);
888 return (status != SOCKET_ID_ANY) ? status : 0;
892 app_link_rss_enabled(struct app_link_params *cp)
894 return (cp->n_rss_qs) ? 1 : 0;
898 app_link_rss_setup(struct app_link_params *cp)
900 struct rte_eth_dev_info dev_info;
901 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
906 memset(&dev_info, 0, sizeof(dev_info));
907 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
909 if (dev_info.reta_size == 0)
910 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
911 cp->name, cp->pmd_id);
913 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
914 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
915 cp->name, cp->pmd_id);
917 /* Setup RETA contents */
918 memset(reta_conf, 0, sizeof(reta_conf));
920 for (i = 0; i < dev_info.reta_size; i++)
921 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
923 for (i = 0; i < dev_info.reta_size; i++) {
924 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
925 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
926 uint32_t rss_qs_pos = i % cp->n_rss_qs;
928 reta_conf[reta_id].reta[reta_pos] =
929 (uint16_t) cp->rss_qs[rss_qs_pos];
933 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
937 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
938 cp->name, cp->pmd_id);
942 app_init_link_set_config(struct app_link_params *p)
945 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
946 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
953 app_init_link(struct app_params *app)
957 app_init_link_frag_ras(app);
959 for (i = 0; i < app->n_links; i++) {
960 struct app_link_params *p_link = &app->link_params[i];
961 uint32_t link_id, n_hwq_in, n_hwq_out, j;
964 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
965 n_hwq_in = app_link_get_n_rxq(app, p_link);
966 n_hwq_out = app_link_get_n_txq(app, p_link);
967 app_init_link_set_config(p_link);
969 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
970 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
977 status = rte_eth_dev_configure(
983 rte_panic("%s (%" PRId32 "): "
984 "init error (%" PRId32 ")\n",
985 p_link->name, p_link->pmd_id, status);
987 rte_eth_macaddr_get(p_link->pmd_id,
988 (struct ether_addr *) &p_link->mac_addr);
991 rte_eth_promiscuous_enable(p_link->pmd_id);
994 for (j = 0; j < app->n_pktq_hwq_in; j++) {
995 struct app_pktq_hwq_in_params *p_rxq =
996 &app->hwq_in_params[j];
997 uint32_t rxq_link_id, rxq_queue_id;
999 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
1000 &rxq_link_id, &rxq_queue_id);
1001 if (rxq_link_id != link_id)
1004 status = rte_eth_rx_queue_setup(
1008 app_get_cpu_socket_id(p_link->pmd_id),
1010 app->mempool[p_rxq->mempool_id]);
1012 rte_panic("%s (%" PRIu32 "): "
1013 "%s init error (%" PRId32 ")\n",
1021 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1022 struct app_pktq_hwq_out_params *p_txq =
1023 &app->hwq_out_params[j];
1024 uint32_t txq_link_id, txq_queue_id;
1026 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1027 &txq_link_id, &txq_queue_id);
1028 if (txq_link_id != link_id)
1031 status = rte_eth_tx_queue_setup(
1035 app_get_cpu_socket_id(p_link->pmd_id),
1038 rte_panic("%s (%" PRIu32 "): "
1039 "%s init error (%" PRId32 ")\n",
1047 status = rte_eth_dev_start(p_link->pmd_id);
1049 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1050 p_link->name, status);
1053 app_link_set_arp_filter(app, p_link);
1054 app_link_set_tcp_syn_filter(app, p_link);
1055 if (app_link_rss_enabled(p_link))
1056 app_link_rss_setup(p_link);
1059 app_link_up_internal(app, p_link);
1062 app_check_link(app);
1066 app_init_swq(struct app_params *app)
1070 for (i = 0; i < app->n_pktq_swq; i++) {
1071 struct app_pktq_swq_params *p = &app->swq_params[i];
1074 if (app_swq_get_readers(app, p) == 1)
1075 flags |= RING_F_SC_DEQ;
1076 if (app_swq_get_writers(app, p) == 1)
1077 flags |= RING_F_SP_ENQ;
1079 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1080 app->swq[i] = rte_ring_create(
1086 if (app->swq[i] == NULL)
1087 rte_panic("%s init error\n", p->name);
1092 app_init_tm(struct app_params *app)
1096 for (i = 0; i < app->n_pktq_tm; i++) {
1097 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1098 struct app_link_params *p_link;
1099 struct rte_eth_link link_eth_params;
1100 struct rte_sched_port *sched;
1101 uint32_t n_subports, subport_id;
1104 p_link = app_get_link_for_tm(app, p_tm);
1106 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1109 p_tm->sched_port_params.name = p_tm->name;
1110 p_tm->sched_port_params.socket =
1111 app_get_cpu_socket_id(p_link->pmd_id);
1112 p_tm->sched_port_params.rate =
1113 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1115 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1116 sched = rte_sched_port_config(&p_tm->sched_port_params);
1118 rte_panic("%s init error\n", p_tm->name);
1122 n_subports = p_tm->sched_port_params.n_subports_per_port;
1123 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1124 uint32_t n_pipes_per_subport, pipe_id;
1126 status = rte_sched_subport_config(sched,
1128 &p_tm->sched_subport_params[subport_id]);
1130 rte_panic("%s subport %" PRIu32
1131 " init error (%" PRId32 ")\n",
1132 p_tm->name, subport_id, status);
1135 n_pipes_per_subport =
1136 p_tm->sched_port_params.n_pipes_per_subport;
1138 pipe_id < n_pipes_per_subport;
1140 int profile_id = p_tm->sched_pipe_to_profile[
1141 subport_id * APP_MAX_SCHED_PIPES +
1144 if (profile_id == -1)
1147 status = rte_sched_pipe_config(sched,
1152 rte_panic("%s subport %" PRIu32
1154 " (profile %" PRId32 ") "
1155 "init error (% " PRId32 ")\n",
1156 p_tm->name, subport_id, pipe_id,
1157 profile_id, status);
1164 app_init_tap(struct app_params *app)
1168 for (i = 0; i < app->n_pktq_tap; i++) {
1169 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1173 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1175 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1177 rte_panic("Cannot open file /dev/net/tun\n");
1179 memset(&ifr, 0, sizeof(ifr));
1180 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1181 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1183 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1185 rte_panic("TAP setup error\n");
1191 #ifdef RTE_LIBRTE_KNI
1193 kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
1196 if (port_id >= rte_eth_dev_count())
1200 rte_eth_dev_set_link_up(port_id) :
1201 rte_eth_dev_set_link_down(port_id);
1207 kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
1210 if (port_id >= rte_eth_dev_count())
1213 if (new_mtu > ETHER_MAX_LEN)
1217 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1223 #endif /* RTE_LIBRTE_KNI */
1225 #ifndef RTE_LIBRTE_KNI
1227 app_init_kni(struct app_params *app) {
1228 if (app->n_pktq_kni == 0)
1231 rte_panic("Can not init KNI without librte_kni support.\n");
1235 app_init_kni(struct app_params *app) {
1238 if (app->n_pktq_kni == 0)
1241 rte_kni_init(app->n_pktq_kni);
1243 for (i = 0; i < app->n_pktq_kni; i++) {
1244 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1245 struct app_link_params *p_link;
1246 struct rte_eth_dev_info dev_info;
1247 struct app_mempool_params *mempool_params;
1248 struct rte_mempool *mempool;
1249 struct rte_kni_conf conf;
1250 struct rte_kni_ops ops;
1253 p_link = app_get_link_for_kni(app, p_kni);
1254 memset(&dev_info, 0, sizeof(dev_info));
1255 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1258 mempool_params = &app->mempool_params[p_kni->mempool_id];
1259 mempool = app->mempool[p_kni->mempool_id];
1262 memset(&conf, 0, sizeof(conf));
1263 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1264 conf.force_bind = p_kni->force_bind;
1265 if (conf.force_bind) {
1268 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1271 p_kni->hyper_th_id);
1274 rte_panic("%s invalid CPU core\n", p_kni->name);
1276 conf.core_id = (uint32_t) lcore_id;
1278 conf.group_id = p_link->pmd_id;
1279 conf.mbuf_size = mempool_params->buffer_size;
1280 conf.addr = dev_info.pci_dev->addr;
1281 conf.id = dev_info.pci_dev->id;
1283 memset(&ops, 0, sizeof(ops));
1284 ops.port_id = (uint8_t) p_link->pmd_id;
1285 ops.change_mtu = kni_change_mtu;
1286 ops.config_network_if = kni_config_network_interface;
1288 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1289 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1291 rte_panic("%s init error\n", p_kni->name);
1294 #endif /* RTE_LIBRTE_KNI */
1297 app_init_msgq(struct app_params *app)
1301 for (i = 0; i < app->n_msgq; i++) {
1302 struct app_msgq_params *p = &app->msgq_params[i];
1304 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1305 app->msgq[i] = rte_ring_create(
1309 RING_F_SP_ENQ | RING_F_SC_DEQ);
1311 if (app->msgq[i] == NULL)
1312 rte_panic("%s init error\n", p->name);
1316 void app_pipeline_params_get(struct app_params *app,
1317 struct app_pipeline_params *p_in,
1318 struct pipeline_params *p_out)
1322 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1324 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1326 p_out->socket_id = (int) p_in->socket_id;
1328 p_out->log_level = app->log_level;
1331 p_out->n_ports_in = p_in->n_pktq_in;
1332 for (i = 0; i < p_in->n_pktq_in; i++) {
1333 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1334 struct pipeline_port_in_params *out = &p_out->port_in[i];
1337 case APP_PKTQ_IN_HWQ:
1339 struct app_pktq_hwq_in_params *p_hwq_in =
1340 &app->hwq_in_params[in->id];
1341 struct app_link_params *p_link =
1342 app_get_link_for_rxq(app, p_hwq_in);
1343 uint32_t rxq_link_id, rxq_queue_id;
1345 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1349 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1350 out->params.ethdev.port_id = p_link->pmd_id;
1351 out->params.ethdev.queue_id = rxq_queue_id;
1352 out->burst_size = p_hwq_in->burst;
1355 case APP_PKTQ_IN_SWQ:
1357 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1359 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1360 if (app_swq_get_readers(app, swq_params) == 1) {
1361 out->type = PIPELINE_PORT_IN_RING_READER;
1362 out->params.ring.ring = app->swq[in->id];
1363 out->burst_size = app->swq_params[in->id].burst_read;
1365 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1366 out->params.ring_multi.ring = app->swq[in->id];
1367 out->burst_size = swq_params->burst_read;
1370 if (swq_params->ipv4_frag == 1) {
1371 struct rte_port_ring_reader_ipv4_frag_params *params =
1372 &out->params.ring_ipv4_frag;
1374 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1375 params->ring = app->swq[in->id];
1376 params->mtu = swq_params->mtu;
1377 params->metadata_size = swq_params->metadata_size;
1378 params->pool_direct =
1379 app->mempool[swq_params->mempool_direct_id];
1380 params->pool_indirect =
1381 app->mempool[swq_params->mempool_indirect_id];
1382 out->burst_size = swq_params->burst_read;
1384 struct rte_port_ring_reader_ipv6_frag_params *params =
1385 &out->params.ring_ipv6_frag;
1387 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1388 params->ring = app->swq[in->id];
1389 params->mtu = swq_params->mtu;
1390 params->metadata_size = swq_params->metadata_size;
1391 params->pool_direct =
1392 app->mempool[swq_params->mempool_direct_id];
1393 params->pool_indirect =
1394 app->mempool[swq_params->mempool_indirect_id];
1395 out->burst_size = swq_params->burst_read;
1400 case APP_PKTQ_IN_TM:
1402 out->type = PIPELINE_PORT_IN_SCHED_READER;
1403 out->params.sched.sched = app->tm[in->id];
1404 out->burst_size = app->tm_params[in->id].burst_read;
1407 case APP_PKTQ_IN_TAP:
1409 struct app_pktq_tap_params *tap_params =
1410 &app->tap_params[in->id];
1411 struct app_mempool_params *mempool_params =
1412 &app->mempool_params[tap_params->mempool_id];
1413 struct rte_mempool *mempool =
1414 app->mempool[tap_params->mempool_id];
1416 out->type = PIPELINE_PORT_IN_FD_READER;
1417 out->params.fd.fd = app->tap[in->id];
1418 out->params.fd.mtu = mempool_params->buffer_size;
1419 out->params.fd.mempool = mempool;
1420 out->burst_size = app->tap_params[in->id].burst_read;
1423 #ifdef RTE_LIBRTE_KNI
1424 case APP_PKTQ_IN_KNI:
1426 out->type = PIPELINE_PORT_IN_KNI_READER;
1427 out->params.kni.kni = app->kni[in->id];
1428 out->burst_size = app->kni_params[in->id].burst_read;
1431 #endif /* RTE_LIBRTE_KNI */
1432 case APP_PKTQ_IN_SOURCE:
1434 uint32_t mempool_id =
1435 app->source_params[in->id].mempool_id;
1437 out->type = PIPELINE_PORT_IN_SOURCE;
1438 out->params.source.mempool = app->mempool[mempool_id];
1439 out->burst_size = app->source_params[in->id].burst;
1440 out->params.source.file_name =
1441 app->source_params[in->id].file_name;
1442 out->params.source.n_bytes_per_pkt =
1443 app->source_params[in->id].n_bytes_per_pkt;
1452 p_out->n_ports_out = p_in->n_pktq_out;
1453 for (i = 0; i < p_in->n_pktq_out; i++) {
1454 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1455 struct pipeline_port_out_params *out = &p_out->port_out[i];
1458 case APP_PKTQ_OUT_HWQ:
1460 struct app_pktq_hwq_out_params *p_hwq_out =
1461 &app->hwq_out_params[in->id];
1462 struct app_link_params *p_link =
1463 app_get_link_for_txq(app, p_hwq_out);
1464 uint32_t txq_link_id, txq_queue_id;
1466 sscanf(p_hwq_out->name,
1467 "TXQ%" SCNu32 ".%" SCNu32,
1471 if (p_hwq_out->dropless == 0) {
1472 struct rte_port_ethdev_writer_params *params =
1473 &out->params.ethdev;
1475 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1476 params->port_id = p_link->pmd_id;
1477 params->queue_id = txq_queue_id;
1478 params->tx_burst_sz =
1479 app->hwq_out_params[in->id].burst;
1481 struct rte_port_ethdev_writer_nodrop_params
1482 *params = &out->params.ethdev_nodrop;
1485 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1486 params->port_id = p_link->pmd_id;
1487 params->queue_id = txq_queue_id;
1488 params->tx_burst_sz = p_hwq_out->burst;
1489 params->n_retries = p_hwq_out->n_retries;
1493 case APP_PKTQ_OUT_SWQ:
1495 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1497 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1498 if (app_swq_get_writers(app, swq_params) == 1) {
1499 if (app->swq_params[in->id].dropless == 0) {
1500 struct rte_port_ring_writer_params *params =
1503 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1504 params->ring = app->swq[in->id];
1505 params->tx_burst_sz =
1506 app->swq_params[in->id].burst_write;
1508 struct rte_port_ring_writer_nodrop_params
1509 *params = &out->params.ring_nodrop;
1512 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1513 params->ring = app->swq[in->id];
1514 params->tx_burst_sz =
1515 app->swq_params[in->id].burst_write;
1517 app->swq_params[in->id].n_retries;
1520 if (swq_params->dropless == 0) {
1521 struct rte_port_ring_multi_writer_params *params =
1522 &out->params.ring_multi;
1524 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1525 params->ring = app->swq[in->id];
1526 params->tx_burst_sz = swq_params->burst_write;
1528 struct rte_port_ring_multi_writer_nodrop_params
1529 *params = &out->params.ring_multi_nodrop;
1531 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1532 params->ring = app->swq[in->id];
1533 params->tx_burst_sz = swq_params->burst_write;
1534 params->n_retries = swq_params->n_retries;
1538 if (swq_params->ipv4_ras == 1) {
1539 struct rte_port_ring_writer_ipv4_ras_params *params =
1540 &out->params.ring_ipv4_ras;
1542 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1543 params->ring = app->swq[in->id];
1544 params->tx_burst_sz = swq_params->burst_write;
1546 struct rte_port_ring_writer_ipv6_ras_params *params =
1547 &out->params.ring_ipv6_ras;
1549 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1550 params->ring = app->swq[in->id];
1551 params->tx_burst_sz = swq_params->burst_write;
1556 case APP_PKTQ_OUT_TM:
1558 struct rte_port_sched_writer_params *params =
1561 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1562 params->sched = app->tm[in->id];
1563 params->tx_burst_sz =
1564 app->tm_params[in->id].burst_write;
1567 case APP_PKTQ_OUT_TAP:
1569 struct rte_port_fd_writer_params *params =
1572 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1573 params->fd = app->tap[in->id];
1574 params->tx_burst_sz =
1575 app->tap_params[in->id].burst_write;
1578 #ifdef RTE_LIBRTE_KNI
1579 case APP_PKTQ_OUT_KNI:
1581 struct app_pktq_kni_params *p_kni =
1582 &app->kni_params[in->id];
1584 if (p_kni->dropless == 0) {
1585 struct rte_port_kni_writer_params *params =
1588 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1589 params->kni = app->kni[in->id];
1590 params->tx_burst_sz =
1591 app->kni_params[in->id].burst_write;
1593 struct rte_port_kni_writer_nodrop_params
1594 *params = &out->params.kni_nodrop;
1596 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1597 params->kni = app->kni[in->id];
1598 params->tx_burst_sz =
1599 app->kni_params[in->id].burst_write;
1601 app->kni_params[in->id].n_retries;
1605 #endif /* RTE_LIBRTE_KNI */
1606 case APP_PKTQ_OUT_SINK:
1608 out->type = PIPELINE_PORT_OUT_SINK;
1609 out->params.sink.file_name =
1610 app->sink_params[in->id].file_name;
1611 out->params.sink.max_n_pkts =
1612 app->sink_params[in->id].
1623 p_out->n_msgq = p_in->n_msgq_in;
1625 for (i = 0; i < p_in->n_msgq_in; i++)
1626 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1628 for (i = 0; i < p_in->n_msgq_out; i++)
1629 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1632 p_out->n_args = p_in->n_args;
1633 for (i = 0; i < p_in->n_args; i++) {
1634 p_out->args_name[i] = p_in->args_name[i];
1635 p_out->args_value[i] = p_in->args_value[i];
1640 app_init_pipelines(struct app_params *app)
1644 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1645 struct app_pipeline_params *params =
1646 &app->pipeline_params[p_id];
1647 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1648 struct pipeline_type *ptype;
1649 struct pipeline_params pp;
1651 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1653 ptype = app_pipeline_type_find(app, params->type);
1655 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1658 app_pipeline_params_get(app, params, &pp);
1662 if (ptype->be_ops->f_init) {
1663 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1665 if (data->be == NULL)
1666 rte_panic("Pipeline instance \"%s\" back-end "
1667 "init error\n", params->name);
1672 if (ptype->fe_ops->f_init) {
1673 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1675 if (data->fe == NULL)
1676 rte_panic("Pipeline instance \"%s\" front-end "
1677 "init error\n", params->name);
1680 data->ptype = ptype;
1682 data->timer_period = (rte_get_tsc_hz() *
1683 params->timer_period) / 100;
1688 app_post_init_pipelines(struct app_params *app)
1692 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1693 struct app_pipeline_params *params =
1694 &app->pipeline_params[p_id];
1695 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1698 if (data->ptype->fe_ops->f_post_init == NULL)
1701 status = data->ptype->fe_ops->f_post_init(data->fe);
1703 rte_panic("Pipeline instance \"%s\" front-end "
1704 "post-init error\n", params->name);
1709 app_init_threads(struct app_params *app)
1711 uint64_t time = rte_get_tsc_cycles();
1714 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1715 struct app_pipeline_params *params =
1716 &app->pipeline_params[p_id];
1717 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1718 struct pipeline_type *ptype;
1719 struct app_thread_data *t;
1720 struct app_thread_pipeline_data *p;
1723 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1726 params->hyper_th_id);
1729 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1732 (params->hyper_th_id) ? "h" : "");
1734 t = &app->thread_data[lcore_id];
1736 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1737 t->thread_req_deadline = time + t->timer_period;
1739 t->headroom_cycles = 0;
1740 t->headroom_time = rte_get_tsc_cycles();
1741 t->headroom_ratio = 0.0;
1743 t->msgq_in = app_thread_msgq_in_get(app,
1746 params->hyper_th_id);
1747 if (t->msgq_in == NULL)
1748 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1751 t->msgq_out = app_thread_msgq_out_get(app,
1754 params->hyper_th_id);
1755 if (t->msgq_out == NULL)
1756 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1759 ptype = app_pipeline_type_find(app, params->type);
1761 rte_panic("Init error: Unknown pipeline "
1762 "type \"%s\"\n", params->type);
1764 p = (ptype->be_ops->f_run == NULL) ?
1765 &t->regular[t->n_regular] :
1766 &t->custom[t->n_custom];
1768 p->pipeline_id = p_id;
1770 p->f_run = ptype->be_ops->f_run;
1771 p->f_timer = ptype->be_ops->f_timer;
1772 p->timer_period = data->timer_period;
1773 p->deadline = time + data->timer_period;
1777 if (ptype->be_ops->f_run == NULL)
1784 int app_init(struct app_params *app)
1786 app_init_core_map(app);
1787 app_init_core_mask(app);
1790 app_init_mempool(app);
1798 app_pipeline_common_cmd_push(app);
1799 app_pipeline_thread_cmd_push(app);
1800 app_pipeline_type_register(app, &pipeline_master);
1801 app_pipeline_type_register(app, &pipeline_passthrough);
1802 app_pipeline_type_register(app, &pipeline_flow_classification);
1803 app_pipeline_type_register(app, &pipeline_flow_actions);
1804 app_pipeline_type_register(app, &pipeline_firewall);
1805 app_pipeline_type_register(app, &pipeline_routing);
1807 app_init_pipelines(app);
1808 app_init_threads(app);
1813 int app_post_init(struct app_params *app)
1815 app_post_init_pipelines(app);
1821 app_pipeline_type_cmd_push(struct app_params *app,
1822 struct pipeline_type *ptype)
1824 cmdline_parse_ctx_t *cmds;
1827 /* Check input arguments */
1828 if ((app == NULL) ||
1832 n_cmds = pipeline_type_cmds_count(ptype);
1836 cmds = ptype->fe_ops->cmds;
1838 /* Check for available slots in the application commands array */
1839 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1842 /* Push pipeline commands into the application */
1843 memcpy(&app->cmds[app->n_cmds],
1845 n_cmds * sizeof(cmdline_parse_ctx_t));
1847 for (i = 0; i < n_cmds; i++)
1848 app->cmds[app->n_cmds + i]->data = app;
1850 app->n_cmds += n_cmds;
1851 app->cmds[app->n_cmds] = NULL;
1857 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1861 /* Check input arguments */
1862 if ((app == NULL) ||
1864 (ptype->name == NULL) ||
1865 (strlen(ptype->name) == 0) ||
1866 (ptype->be_ops->f_init == NULL) ||
1867 (ptype->be_ops->f_timer == NULL))
1870 /* Check for duplicate entry */
1871 for (i = 0; i < app->n_pipeline_types; i++)
1872 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1875 /* Check for resource availability */
1876 n_cmds = pipeline_type_cmds_count(ptype);
1877 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1878 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1881 /* Copy pipeline type */
1882 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1884 sizeof(struct pipeline_type));
1886 /* Copy CLI commands */
1888 app_pipeline_type_cmd_push(app, ptype);
1894 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1898 for (i = 0; i < app->n_pipeline_types; i++)
1899 if (strcmp(app->pipeline_type[i].name, name) == 0)
1900 return &app->pipeline_type[i];