1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
8 #include <netinet/in.h>
9 #ifdef RTE_EXEC_ENV_LINUXAPP
11 #include <linux/if_tun.h>
14 #include <sys/ioctl.h>
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
19 #include <rte_ether.h>
22 #include <rte_malloc.h>
23 #include <rte_bus_pci.h>
27 #include "pipeline_common_fe.h"
28 #include "pipeline_master.h"
29 #include "pipeline_firewall.h"
30 #include "pipeline_flow_classification.h"
31 #include "pipeline_flow_actions.h"
32 #include "thread_fe.h"
34 #define APP_NAME_SIZE 32
36 #define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
39 app_init_core_map(struct app_params *app)
41 APP_LOG(app, HIGH, "Initializing CPU core map ...");
42 app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
45 if (app->core_map == NULL)
46 rte_panic("Cannot create CPU core map\n");
48 if (app->log_level >= APP_LOG_LEVEL_LOW)
49 cpu_core_map_print(app->core_map);
52 /* Core Mask String in Hex Representation */
53 #define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
56 app_init_core_mask(struct app_params *app)
59 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
61 for (i = 0; i < app->n_pipelines; i++) {
62 struct app_pipeline_params *p = &app->pipeline_params[i];
65 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
71 rte_panic("Cannot create CPU core mask\n");
73 app_core_enable_in_core_mask(app, lcore_id);
76 app_core_build_core_mask_string(app, core_mask_str);
77 APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
81 app_init_eal(struct app_params *app)
84 char core_mask_str[APP_CORE_MASK_STRING_SIZE];
85 struct app_eal_params *p = &app->eal_params;
90 app->eal_argv[n_args++] = strdup(app->app_name);
92 app_core_build_core_mask_string(app, core_mask_str);
93 snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
94 app->eal_argv[n_args++] = strdup(buffer);
97 snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
98 app->eal_argv[n_args++] = strdup(buffer);
101 if (p->master_lcore_present) {
104 "--master-lcore=%" PRIu32,
106 app->eal_argv[n_args++] = strdup(buffer);
109 snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
110 app->eal_argv[n_args++] = strdup(buffer);
112 if (p->memory_present) {
113 snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
114 app->eal_argv[n_args++] = strdup(buffer);
117 if (p->ranks_present) {
118 snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
119 app->eal_argv[n_args++] = strdup(buffer);
122 for (i = 0; i < APP_MAX_LINKS; i++) {
123 if (p->pci_blacklist[i] == NULL)
128 "--pci-blacklist=%s",
129 p->pci_blacklist[i]);
130 app->eal_argv[n_args++] = strdup(buffer);
133 if (app->port_mask != 0)
134 for (i = 0; i < APP_MAX_LINKS; i++) {
135 if (p->pci_whitelist[i] == NULL)
140 "--pci-whitelist=%s",
141 p->pci_whitelist[i]);
142 app->eal_argv[n_args++] = strdup(buffer);
145 for (i = 0; i < app->n_links; i++) {
146 char *pci_bdf = app->link_params[i].pci_bdf;
150 "--pci-whitelist=%s",
152 app->eal_argv[n_args++] = strdup(buffer);
155 for (i = 0; i < APP_MAX_LINKS; i++) {
156 if (p->vdev[i] == NULL)
163 app->eal_argv[n_args++] = strdup(buffer);
166 if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
167 snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
168 app->eal_argv[n_args++] = strdup(buffer);
176 app->eal_argv[n_args++] = strdup(buffer);
180 snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
181 app->eal_argv[n_args++] = strdup(buffer);
184 if (p->log_level_present) {
187 "--log-level=%" PRIu32,
189 app->eal_argv[n_args++] = strdup(buffer);
192 if ((p->version_present) && p->version) {
193 snprintf(buffer, sizeof(buffer), "-v");
194 app->eal_argv[n_args++] = strdup(buffer);
197 if ((p->help_present) && p->help) {
198 snprintf(buffer, sizeof(buffer), "--help");
199 app->eal_argv[n_args++] = strdup(buffer);
202 if ((p->no_huge_present) && p->no_huge) {
203 snprintf(buffer, sizeof(buffer), "--no-huge");
204 app->eal_argv[n_args++] = strdup(buffer);
207 if ((p->no_pci_present) && p->no_pci) {
208 snprintf(buffer, sizeof(buffer), "--no-pci");
209 app->eal_argv[n_args++] = strdup(buffer);
212 if ((p->no_hpet_present) && p->no_hpet) {
213 snprintf(buffer, sizeof(buffer), "--no-hpet");
214 app->eal_argv[n_args++] = strdup(buffer);
217 if ((p->no_shconf_present) && p->no_shconf) {
218 snprintf(buffer, sizeof(buffer), "--no-shconf");
219 app->eal_argv[n_args++] = strdup(buffer);
223 snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
224 app->eal_argv[n_args++] = strdup(buffer);
232 app->eal_argv[n_args++] = strdup(buffer);
236 snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
237 app->eal_argv[n_args++] = strdup(buffer);
240 if (p->file_prefix) {
245 app->eal_argv[n_args++] = strdup(buffer);
248 if (p->base_virtaddr) {
251 "--base-virtaddr=%s",
253 app->eal_argv[n_args++] = strdup(buffer);
256 if ((p->create_uio_dev_present) && p->create_uio_dev) {
257 snprintf(buffer, sizeof(buffer), "--create-uio-dev");
258 app->eal_argv[n_args++] = strdup(buffer);
266 app->eal_argv[n_args++] = strdup(buffer);
269 snprintf(buffer, sizeof(buffer), "--");
270 app->eal_argv[n_args++] = strdup(buffer);
272 app->eal_argc = n_args;
274 APP_LOG(app, HIGH, "Initializing EAL ...");
275 if (app->log_level >= APP_LOG_LEVEL_LOW) {
278 fprintf(stdout, "[APP] EAL arguments: \"");
279 for (i = 1; i < app->eal_argc; i++)
280 fprintf(stdout, "%s ", app->eal_argv[i]);
281 fprintf(stdout, "\"\n");
284 status = rte_eal_init(app->eal_argc, app->eal_argv);
286 rte_panic("EAL init error\n");
290 app_init_mempool(struct app_params *app)
294 for (i = 0; i < app->n_mempools; i++) {
295 struct app_mempool_params *p = &app->mempool_params[i];
297 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
298 app->mempool[i] = rte_pktmbuf_pool_create(
304 sizeof(struct rte_mbuf), /* mbuf data size */
307 if (app->mempool[i] == NULL)
308 rte_panic("%s init error\n", p->name);
313 app_link_filter_arp_add(struct app_link_params *link)
315 struct rte_eth_ethertype_filter filter = {
316 .ether_type = ETHER_TYPE_ARP,
318 .queue = link->arp_q,
321 return rte_eth_dev_filter_ctrl(link->pmd_id,
322 RTE_ETH_FILTER_ETHERTYPE,
328 app_link_filter_tcp_syn_add(struct app_link_params *link)
330 struct rte_eth_syn_filter filter = {
332 .queue = link->tcp_syn_q,
335 return rte_eth_dev_filter_ctrl(link->pmd_id,
342 app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
344 struct rte_eth_ntuple_filter filter = {
345 .flags = RTE_5TUPLE_FLAGS,
346 .dst_ip = rte_bswap32(l2->ip),
347 .dst_ip_mask = UINT32_MAX, /* Enable */
349 .src_ip_mask = 0, /* Disable */
351 .dst_port_mask = 0, /* Disable */
353 .src_port_mask = 0, /* Disable */
355 .proto_mask = 0, /* Disable */
357 .priority = 1, /* Lowest */
358 .queue = l1->ip_local_q,
361 return rte_eth_dev_filter_ctrl(l1->pmd_id,
362 RTE_ETH_FILTER_NTUPLE,
368 app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
370 struct rte_eth_ntuple_filter filter = {
371 .flags = RTE_5TUPLE_FLAGS,
372 .dst_ip = rte_bswap32(l2->ip),
373 .dst_ip_mask = UINT32_MAX, /* Enable */
375 .src_ip_mask = 0, /* Disable */
377 .dst_port_mask = 0, /* Disable */
379 .src_port_mask = 0, /* Disable */
381 .proto_mask = 0, /* Disable */
383 .priority = 1, /* Lowest */
384 .queue = l1->ip_local_q,
387 return rte_eth_dev_filter_ctrl(l1->pmd_id,
388 RTE_ETH_FILTER_NTUPLE,
389 RTE_ETH_FILTER_DELETE,
394 app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
396 struct rte_eth_ntuple_filter filter = {
397 .flags = RTE_5TUPLE_FLAGS,
398 .dst_ip = rte_bswap32(l2->ip),
399 .dst_ip_mask = UINT32_MAX, /* Enable */
401 .src_ip_mask = 0, /* Disable */
403 .dst_port_mask = 0, /* Disable */
405 .src_port_mask = 0, /* Disable */
406 .proto = IPPROTO_TCP,
407 .proto_mask = UINT8_MAX, /* Enable */
409 .priority = 2, /* Higher priority than IP */
410 .queue = l1->tcp_local_q,
413 return rte_eth_dev_filter_ctrl(l1->pmd_id,
414 RTE_ETH_FILTER_NTUPLE,
420 app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
422 struct rte_eth_ntuple_filter filter = {
423 .flags = RTE_5TUPLE_FLAGS,
424 .dst_ip = rte_bswap32(l2->ip),
425 .dst_ip_mask = UINT32_MAX, /* Enable */
427 .src_ip_mask = 0, /* Disable */
429 .dst_port_mask = 0, /* Disable */
431 .src_port_mask = 0, /* Disable */
432 .proto = IPPROTO_TCP,
433 .proto_mask = UINT8_MAX, /* Enable */
435 .priority = 2, /* Higher priority than IP */
436 .queue = l1->tcp_local_q,
439 return rte_eth_dev_filter_ctrl(l1->pmd_id,
440 RTE_ETH_FILTER_NTUPLE,
441 RTE_ETH_FILTER_DELETE,
446 app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
448 struct rte_eth_ntuple_filter filter = {
449 .flags = RTE_5TUPLE_FLAGS,
450 .dst_ip = rte_bswap32(l2->ip),
451 .dst_ip_mask = UINT32_MAX, /* Enable */
453 .src_ip_mask = 0, /* Disable */
455 .dst_port_mask = 0, /* Disable */
457 .src_port_mask = 0, /* Disable */
458 .proto = IPPROTO_UDP,
459 .proto_mask = UINT8_MAX, /* Enable */
461 .priority = 2, /* Higher priority than IP */
462 .queue = l1->udp_local_q,
465 return rte_eth_dev_filter_ctrl(l1->pmd_id,
466 RTE_ETH_FILTER_NTUPLE,
472 app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
474 struct rte_eth_ntuple_filter filter = {
475 .flags = RTE_5TUPLE_FLAGS,
476 .dst_ip = rte_bswap32(l2->ip),
477 .dst_ip_mask = UINT32_MAX, /* Enable */
479 .src_ip_mask = 0, /* Disable */
481 .dst_port_mask = 0, /* Disable */
483 .src_port_mask = 0, /* Disable */
484 .proto = IPPROTO_UDP,
485 .proto_mask = UINT8_MAX, /* Enable */
487 .priority = 2, /* Higher priority than IP */
488 .queue = l1->udp_local_q,
491 return rte_eth_dev_filter_ctrl(l1->pmd_id,
492 RTE_ETH_FILTER_NTUPLE,
493 RTE_ETH_FILTER_DELETE,
498 app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
500 struct rte_eth_ntuple_filter filter = {
501 .flags = RTE_5TUPLE_FLAGS,
502 .dst_ip = rte_bswap32(l2->ip),
503 .dst_ip_mask = UINT32_MAX, /* Enable */
505 .src_ip_mask = 0, /* Disable */
507 .dst_port_mask = 0, /* Disable */
509 .src_port_mask = 0, /* Disable */
510 .proto = IPPROTO_SCTP,
511 .proto_mask = UINT8_MAX, /* Enable */
513 .priority = 2, /* Higher priority than IP */
514 .queue = l1->sctp_local_q,
517 return rte_eth_dev_filter_ctrl(l1->pmd_id,
518 RTE_ETH_FILTER_NTUPLE,
524 app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
526 struct rte_eth_ntuple_filter filter = {
527 .flags = RTE_5TUPLE_FLAGS,
528 .dst_ip = rte_bswap32(l2->ip),
529 .dst_ip_mask = UINT32_MAX, /* Enable */
531 .src_ip_mask = 0, /* Disable */
533 .dst_port_mask = 0, /* Disable */
535 .src_port_mask = 0, /* Disable */
536 .proto = IPPROTO_SCTP,
537 .proto_mask = UINT8_MAX, /* Enable */
539 .priority = 2, /* Higher priority than IP */
540 .queue = l1->sctp_local_q,
543 return rte_eth_dev_filter_ctrl(l1->pmd_id,
544 RTE_ETH_FILTER_NTUPLE,
545 RTE_ETH_FILTER_DELETE,
550 app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
552 if (cp->arp_q != 0) {
553 int status = app_link_filter_arp_add(cp);
555 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
556 "Adding ARP filter (queue = %" PRIu32 ")",
557 cp->name, cp->pmd_id, cp->arp_q);
560 rte_panic("%s (%" PRIu32 "): "
561 "Error adding ARP filter "
562 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
563 cp->name, cp->pmd_id, cp->arp_q, status);
568 app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
570 if (cp->tcp_syn_q != 0) {
571 int status = app_link_filter_tcp_syn_add(cp);
573 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
574 "Adding TCP SYN filter (queue = %" PRIu32 ")",
575 cp->name, cp->pmd_id, cp->tcp_syn_q);
578 rte_panic("%s (%" PRIu32 "): "
579 "Error adding TCP SYN filter "
580 "(queue = %" PRIu32 ") (%" PRId32 ")\n",
581 cp->name, cp->pmd_id, cp->tcp_syn_q,
587 app_link_up_internal(struct app_params *app, struct app_link_params *cp)
592 /* For each link, add filters for IP of current link */
594 for (i = 0; i < app->n_links; i++) {
595 struct app_link_params *p = &app->link_params[i];
598 if (p->ip_local_q != 0) {
599 int status = app_link_filter_ip_add(p, cp);
601 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
602 "Adding IP filter (queue= %" PRIu32
603 ", IP = 0x%08" PRIx32 ")",
604 p->name, p->pmd_id, p->ip_local_q,
608 rte_panic("%s (%" PRIu32 "): "
610 "filter (queue= %" PRIu32 ", "
614 p->ip_local_q, cp->ip, status);
618 if (p->tcp_local_q != 0) {
619 int status = app_link_filter_tcp_add(p, cp);
621 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
624 ", IP = 0x%08" PRIx32 ")",
625 p->name, p->pmd_id, p->tcp_local_q,
629 rte_panic("%s (%" PRIu32 "): "
631 "filter (queue = %" PRIu32 ", "
635 p->tcp_local_q, cp->ip, status);
639 if (p->udp_local_q != 0) {
640 int status = app_link_filter_udp_add(p, cp);
642 APP_LOG(app, LOW, "%s (%" PRIu32 "): "
645 ", IP = 0x%08" PRIx32 ")",
646 p->name, p->pmd_id, p->udp_local_q,
650 rte_panic("%s (%" PRIu32 "): "
652 "filter (queue = %" PRIu32 ", "
656 p->udp_local_q, cp->ip, status);
660 if (p->sctp_local_q != 0) {
661 int status = app_link_filter_sctp_add(p, cp);
663 APP_LOG(app, LOW, "%s (%" PRIu32
664 "): Adding SCTP filter "
666 ", IP = 0x%08" PRIx32 ")",
667 p->name, p->pmd_id, p->sctp_local_q,
671 rte_panic("%s (%" PRIu32 "): "
673 "filter (queue = %" PRIu32 ", "
677 p->sctp_local_q, cp->ip,
684 status = rte_eth_dev_set_link_up(cp->pmd_id);
685 /* Do not panic if PMD does not provide link up functionality */
686 if (status < 0 && status != -ENOTSUP)
687 rte_panic("%s (%" PRIu32 "): PMD set link up error %"
688 PRId32 "\n", cp->name, cp->pmd_id, status);
690 /* Mark link as UP */
695 app_link_down_internal(struct app_params *app, struct app_link_params *cp)
701 status = rte_eth_dev_set_link_down(cp->pmd_id);
702 /* Do not panic if PMD does not provide link down functionality */
703 if (status < 0 && status != -ENOTSUP)
704 rte_panic("%s (%" PRIu32 "): PMD set link down error %"
705 PRId32 "\n", cp->name, cp->pmd_id, status);
707 /* Mark link as DOWN */
710 /* Return if current link IP is not valid */
714 /* For each link, remove filters for IP of current link */
715 for (i = 0; i < app->n_links; i++) {
716 struct app_link_params *p = &app->link_params[i];
719 if (p->ip_local_q != 0) {
720 int status = app_link_filter_ip_del(p, cp);
722 APP_LOG(app, LOW, "%s (%" PRIu32
723 "): Deleting IP filter "
724 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
725 p->name, p->pmd_id, p->ip_local_q, cp->ip);
728 rte_panic("%s (%" PRIu32
729 "): Error deleting IP filter "
733 p->name, p->pmd_id, p->ip_local_q,
738 if (p->tcp_local_q != 0) {
739 int status = app_link_filter_tcp_del(p, cp);
741 APP_LOG(app, LOW, "%s (%" PRIu32
742 "): Deleting TCP filter "
744 ", IP = 0x%" PRIx32 ")",
745 p->name, p->pmd_id, p->tcp_local_q, cp->ip);
748 rte_panic("%s (%" PRIu32
749 "): Error deleting TCP filter "
753 p->name, p->pmd_id, p->tcp_local_q,
758 if (p->udp_local_q != 0) {
759 int status = app_link_filter_udp_del(p, cp);
761 APP_LOG(app, LOW, "%s (%" PRIu32
762 "): Deleting UDP filter "
763 "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
764 p->name, p->pmd_id, p->udp_local_q, cp->ip);
767 rte_panic("%s (%" PRIu32
768 "): Error deleting UDP filter "
772 p->name, p->pmd_id, p->udp_local_q,
777 if (p->sctp_local_q != 0) {
778 int status = app_link_filter_sctp_del(p, cp);
780 APP_LOG(app, LOW, "%s (%" PRIu32
781 "): Deleting SCTP filter "
783 ", IP = 0x%" PRIx32 ")",
784 p->name, p->pmd_id, p->sctp_local_q, cp->ip);
787 rte_panic("%s (%" PRIu32
788 "): Error deleting SCTP filter "
792 p->name, p->pmd_id, p->sctp_local_q,
799 app_check_link(struct app_params *app)
801 uint32_t all_links_up, i;
805 for (i = 0; i < app->n_links; i++) {
806 struct app_link_params *p = &app->link_params[i];
807 struct rte_eth_link link_params;
809 memset(&link_params, 0, sizeof(link_params));
810 rte_eth_link_get(p->pmd_id, &link_params);
812 APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
815 link_params.link_speed / 1000,
816 link_params.link_status ? "UP" : "DOWN");
818 if (link_params.link_status == ETH_LINK_DOWN)
822 if (all_links_up == 0)
823 rte_panic("Some links are DOWN\n");
827 is_any_swq_frag_or_ras(struct app_params *app)
831 for (i = 0; i < app->n_pktq_swq; i++) {
832 struct app_pktq_swq_params *p = &app->swq_params[i];
834 if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
835 (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
843 app_init_link_frag_ras(struct app_params *app)
847 if (is_any_swq_frag_or_ras(app)) {
848 for (i = 0; i < app->n_links; i++) {
849 struct app_link_params *p_link = &app->link_params[i];
850 p_link->conf.txmode.offloads |=
851 DEV_TX_OFFLOAD_MULTI_SEGS;
857 app_get_cpu_socket_id(uint32_t pmd_id)
859 int status = rte_eth_dev_socket_id(pmd_id);
861 return (status != SOCKET_ID_ANY) ? status : 0;
865 app_link_rss_enabled(struct app_link_params *cp)
867 return (cp->n_rss_qs) ? 1 : 0;
871 app_link_rss_setup(struct app_link_params *cp)
873 struct rte_eth_dev_info dev_info;
874 struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
879 memset(&dev_info, 0, sizeof(dev_info));
880 rte_eth_dev_info_get(cp->pmd_id, &dev_info);
882 if (dev_info.reta_size == 0)
883 rte_panic("%s (%u): RSS setup error (null RETA size)\n",
884 cp->name, cp->pmd_id);
886 if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
887 rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
888 cp->name, cp->pmd_id);
890 /* Setup RETA contents */
891 memset(reta_conf, 0, sizeof(reta_conf));
893 for (i = 0; i < dev_info.reta_size; i++)
894 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
896 for (i = 0; i < dev_info.reta_size; i++) {
897 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
898 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
899 uint32_t rss_qs_pos = i % cp->n_rss_qs;
901 reta_conf[reta_id].reta[reta_pos] =
902 (uint16_t) cp->rss_qs[rss_qs_pos];
906 status = rte_eth_dev_rss_reta_update(cp->pmd_id,
910 rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
911 cp->name, cp->pmd_id);
915 app_init_link_set_config(struct app_link_params *p)
918 p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
919 p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
926 app_init_link(struct app_params *app)
930 app_init_link_frag_ras(app);
932 for (i = 0; i < app->n_links; i++) {
933 struct app_link_params *p_link = &app->link_params[i];
934 struct rte_eth_dev_info dev_info;
935 uint32_t link_id, n_hwq_in, n_hwq_out, j;
938 sscanf(p_link->name, "LINK%" PRIu32, &link_id);
939 n_hwq_in = app_link_get_n_rxq(app, p_link);
940 n_hwq_out = app_link_get_n_txq(app, p_link);
941 app_init_link_set_config(p_link);
943 APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
944 "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
951 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
952 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
953 p_link->conf.txmode.offloads |=
954 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
955 status = rte_eth_dev_configure(
961 rte_panic("%s (%" PRId32 "): "
962 "init error (%" PRId32 ")\n",
963 p_link->name, p_link->pmd_id, status);
965 rte_eth_macaddr_get(p_link->pmd_id,
966 (struct ether_addr *) &p_link->mac_addr);
969 rte_eth_promiscuous_enable(p_link->pmd_id);
972 for (j = 0; j < app->n_pktq_hwq_in; j++) {
973 struct app_pktq_hwq_in_params *p_rxq =
974 &app->hwq_in_params[j];
975 uint32_t rxq_link_id, rxq_queue_id;
976 uint16_t nb_rxd = p_rxq->size;
978 sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
979 &rxq_link_id, &rxq_queue_id);
980 if (rxq_link_id != link_id)
983 status = rte_eth_dev_adjust_nb_rx_tx_desc(
988 rte_panic("%s (%" PRIu32 "): "
989 "%s adjust number of Rx descriptors "
990 "error (%" PRId32 ")\n",
996 p_rxq->conf.offloads = p_link->conf.rxmode.offloads;
997 status = rte_eth_rx_queue_setup(
1001 app_get_cpu_socket_id(p_link->pmd_id),
1003 app->mempool[p_rxq->mempool_id]);
1005 rte_panic("%s (%" PRIu32 "): "
1006 "%s init error (%" PRId32 ")\n",
1014 for (j = 0; j < app->n_pktq_hwq_out; j++) {
1015 struct app_pktq_hwq_out_params *p_txq =
1016 &app->hwq_out_params[j];
1017 uint32_t txq_link_id, txq_queue_id;
1018 uint16_t nb_txd = p_txq->size;
1020 sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
1021 &txq_link_id, &txq_queue_id);
1022 if (txq_link_id != link_id)
1025 status = rte_eth_dev_adjust_nb_rx_tx_desc(
1030 rte_panic("%s (%" PRIu32 "): "
1031 "%s adjust number of Tx descriptors "
1032 "error (%" PRId32 ")\n",
1038 p_txq->conf.offloads = p_link->conf.txmode.offloads;
1039 status = rte_eth_tx_queue_setup(
1043 app_get_cpu_socket_id(p_link->pmd_id),
1046 rte_panic("%s (%" PRIu32 "): "
1047 "%s init error (%" PRId32 ")\n",
1055 status = rte_eth_dev_start(p_link->pmd_id);
1057 rte_panic("Cannot start %s (error %" PRId32 ")\n",
1058 p_link->name, status);
1061 app_link_set_arp_filter(app, p_link);
1062 app_link_set_tcp_syn_filter(app, p_link);
1063 if (app_link_rss_enabled(p_link))
1064 app_link_rss_setup(p_link);
1067 app_link_up_internal(app, p_link);
1070 app_check_link(app);
1074 app_init_swq(struct app_params *app)
1078 for (i = 0; i < app->n_pktq_swq; i++) {
1079 struct app_pktq_swq_params *p = &app->swq_params[i];
1082 if (app_swq_get_readers(app, p) == 1)
1083 flags |= RING_F_SC_DEQ;
1084 if (app_swq_get_writers(app, p) == 1)
1085 flags |= RING_F_SP_ENQ;
1087 APP_LOG(app, HIGH, "Initializing %s...", p->name);
1088 app->swq[i] = rte_ring_create(
1094 if (app->swq[i] == NULL)
1095 rte_panic("%s init error\n", p->name);
1100 app_init_tm(struct app_params *app)
1104 for (i = 0; i < app->n_pktq_tm; i++) {
1105 struct app_pktq_tm_params *p_tm = &app->tm_params[i];
1106 struct app_link_params *p_link;
1107 struct rte_eth_link link_eth_params;
1108 struct rte_sched_port *sched;
1109 uint32_t n_subports, subport_id;
1112 p_link = app_get_link_for_tm(app, p_tm);
1114 rte_eth_link_get(p_link->pmd_id, &link_eth_params);
1117 p_tm->sched_port_params.name = p_tm->name;
1118 p_tm->sched_port_params.socket =
1119 app_get_cpu_socket_id(p_link->pmd_id);
1120 p_tm->sched_port_params.rate =
1121 (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
1123 APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
1124 sched = rte_sched_port_config(&p_tm->sched_port_params);
1126 rte_panic("%s init error\n", p_tm->name);
1130 n_subports = p_tm->sched_port_params.n_subports_per_port;
1131 for (subport_id = 0; subport_id < n_subports; subport_id++) {
1132 uint32_t n_pipes_per_subport, pipe_id;
1134 status = rte_sched_subport_config(sched,
1136 &p_tm->sched_subport_params[subport_id]);
1138 rte_panic("%s subport %" PRIu32
1139 " init error (%" PRId32 ")\n",
1140 p_tm->name, subport_id, status);
1143 n_pipes_per_subport =
1144 p_tm->sched_port_params.n_pipes_per_subport;
1146 pipe_id < n_pipes_per_subport;
1148 int profile_id = p_tm->sched_pipe_to_profile[
1149 subport_id * APP_MAX_SCHED_PIPES +
1152 if (profile_id == -1)
1155 status = rte_sched_pipe_config(sched,
1160 rte_panic("%s subport %" PRIu32
1162 " (profile %" PRId32 ") "
1163 "init error (% " PRId32 ")\n",
1164 p_tm->name, subport_id, pipe_id,
1165 profile_id, status);
1171 #ifndef RTE_EXEC_ENV_LINUXAPP
1173 app_init_tap(struct app_params *app) {
1174 if (app->n_pktq_tap == 0)
1177 rte_panic("TAP device not supported.\n");
1181 app_init_tap(struct app_params *app)
1185 for (i = 0; i < app->n_pktq_tap; i++) {
1186 struct app_pktq_tap_params *p_tap = &app->tap_params[i];
1190 APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
1192 fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
1194 rte_panic("Cannot open file /dev/net/tun\n");
1196 memset(&ifr, 0, sizeof(ifr));
1197 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
1198 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
1200 status = ioctl(fd, TUNSETIFF, (void *) &ifr);
1202 rte_panic("TAP setup error\n");
1209 #ifdef RTE_LIBRTE_KNI
1211 kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
1214 if (port_id >= rte_eth_dev_count())
1218 rte_eth_dev_set_link_up(port_id) :
1219 rte_eth_dev_set_link_down(port_id);
1225 kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
1228 if (port_id >= rte_eth_dev_count())
1231 if (new_mtu > ETHER_MAX_LEN)
1235 ret = rte_eth_dev_set_mtu(port_id, new_mtu);
1241 #endif /* RTE_LIBRTE_KNI */
1243 #ifndef RTE_LIBRTE_KNI
1245 app_init_kni(struct app_params *app) {
1246 if (app->n_pktq_kni == 0)
1249 rte_panic("Can not init KNI without librte_kni support.\n");
1253 app_init_kni(struct app_params *app) {
1256 if (app->n_pktq_kni == 0)
1259 rte_kni_init(app->n_pktq_kni);
1261 for (i = 0; i < app->n_pktq_kni; i++) {
1262 struct app_pktq_kni_params *p_kni = &app->kni_params[i];
1263 struct app_link_params *p_link;
1264 struct rte_eth_dev_info dev_info;
1265 struct app_mempool_params *mempool_params;
1266 struct rte_mempool *mempool;
1267 struct rte_kni_conf conf;
1268 struct rte_kni_ops ops;
1271 p_link = app_get_link_for_kni(app, p_kni);
1272 memset(&dev_info, 0, sizeof(dev_info));
1273 rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
1276 mempool_params = &app->mempool_params[p_kni->mempool_id];
1277 mempool = app->mempool[p_kni->mempool_id];
1280 memset(&conf, 0, sizeof(conf));
1281 snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
1282 conf.force_bind = p_kni->force_bind;
1283 if (conf.force_bind) {
1286 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1289 p_kni->hyper_th_id);
1292 rte_panic("%s invalid CPU core\n", p_kni->name);
1294 conf.core_id = (uint32_t) lcore_id;
1296 conf.group_id = p_link->pmd_id;
1297 conf.mbuf_size = mempool_params->buffer_size;
1298 conf.addr = dev_info.pci_dev->addr;
1299 conf.id = dev_info.pci_dev->id;
1301 memset(&ops, 0, sizeof(ops));
1302 ops.port_id = (uint8_t) p_link->pmd_id;
1303 ops.change_mtu = kni_change_mtu;
1304 ops.config_network_if = kni_config_network_interface;
1306 APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
1307 app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
1309 rte_panic("%s init error\n", p_kni->name);
1312 #endif /* RTE_LIBRTE_KNI */
1315 app_init_msgq(struct app_params *app)
1319 for (i = 0; i < app->n_msgq; i++) {
1320 struct app_msgq_params *p = &app->msgq_params[i];
1322 APP_LOG(app, HIGH, "Initializing %s ...", p->name);
1323 app->msgq[i] = rte_ring_create(
1327 RING_F_SP_ENQ | RING_F_SC_DEQ);
1329 if (app->msgq[i] == NULL)
1330 rte_panic("%s init error\n", p->name);
1334 void app_pipeline_params_get(struct app_params *app,
1335 struct app_pipeline_params *p_in,
1336 struct pipeline_params *p_out)
1340 snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
1342 snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
1344 p_out->socket_id = (int) p_in->socket_id;
1346 p_out->log_level = app->log_level;
1349 p_out->n_ports_in = p_in->n_pktq_in;
1350 for (i = 0; i < p_in->n_pktq_in; i++) {
1351 struct app_pktq_in_params *in = &p_in->pktq_in[i];
1352 struct pipeline_port_in_params *out = &p_out->port_in[i];
1355 case APP_PKTQ_IN_HWQ:
1357 struct app_pktq_hwq_in_params *p_hwq_in =
1358 &app->hwq_in_params[in->id];
1359 struct app_link_params *p_link =
1360 app_get_link_for_rxq(app, p_hwq_in);
1361 uint32_t rxq_link_id, rxq_queue_id;
1363 sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
1367 out->type = PIPELINE_PORT_IN_ETHDEV_READER;
1368 out->params.ethdev.port_id = p_link->pmd_id;
1369 out->params.ethdev.queue_id = rxq_queue_id;
1370 out->burst_size = p_hwq_in->burst;
1373 case APP_PKTQ_IN_SWQ:
1375 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1377 if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
1378 if (app_swq_get_readers(app, swq_params) == 1) {
1379 out->type = PIPELINE_PORT_IN_RING_READER;
1380 out->params.ring.ring = app->swq[in->id];
1381 out->burst_size = app->swq_params[in->id].burst_read;
1383 out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
1384 out->params.ring_multi.ring = app->swq[in->id];
1385 out->burst_size = swq_params->burst_read;
1388 if (swq_params->ipv4_frag == 1) {
1389 struct rte_port_ring_reader_ipv4_frag_params *params =
1390 &out->params.ring_ipv4_frag;
1392 out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
1393 params->ring = app->swq[in->id];
1394 params->mtu = swq_params->mtu;
1395 params->metadata_size = swq_params->metadata_size;
1396 params->pool_direct =
1397 app->mempool[swq_params->mempool_direct_id];
1398 params->pool_indirect =
1399 app->mempool[swq_params->mempool_indirect_id];
1400 out->burst_size = swq_params->burst_read;
1402 struct rte_port_ring_reader_ipv6_frag_params *params =
1403 &out->params.ring_ipv6_frag;
1405 out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
1406 params->ring = app->swq[in->id];
1407 params->mtu = swq_params->mtu;
1408 params->metadata_size = swq_params->metadata_size;
1409 params->pool_direct =
1410 app->mempool[swq_params->mempool_direct_id];
1411 params->pool_indirect =
1412 app->mempool[swq_params->mempool_indirect_id];
1413 out->burst_size = swq_params->burst_read;
1418 case APP_PKTQ_IN_TM:
1420 out->type = PIPELINE_PORT_IN_SCHED_READER;
1421 out->params.sched.sched = app->tm[in->id];
1422 out->burst_size = app->tm_params[in->id].burst_read;
1425 #ifdef RTE_EXEC_ENV_LINUXAPP
1426 case APP_PKTQ_IN_TAP:
1428 struct app_pktq_tap_params *tap_params =
1429 &app->tap_params[in->id];
1430 struct app_mempool_params *mempool_params =
1431 &app->mempool_params[tap_params->mempool_id];
1432 struct rte_mempool *mempool =
1433 app->mempool[tap_params->mempool_id];
1435 out->type = PIPELINE_PORT_IN_FD_READER;
1436 out->params.fd.fd = app->tap[in->id];
1437 out->params.fd.mtu = mempool_params->buffer_size;
1438 out->params.fd.mempool = mempool;
1439 out->burst_size = app->tap_params[in->id].burst_read;
1443 #ifdef RTE_LIBRTE_KNI
1444 case APP_PKTQ_IN_KNI:
1446 out->type = PIPELINE_PORT_IN_KNI_READER;
1447 out->params.kni.kni = app->kni[in->id];
1448 out->burst_size = app->kni_params[in->id].burst_read;
1451 #endif /* RTE_LIBRTE_KNI */
1452 case APP_PKTQ_IN_SOURCE:
1454 uint32_t mempool_id =
1455 app->source_params[in->id].mempool_id;
1457 out->type = PIPELINE_PORT_IN_SOURCE;
1458 out->params.source.mempool = app->mempool[mempool_id];
1459 out->burst_size = app->source_params[in->id].burst;
1460 out->params.source.file_name =
1461 app->source_params[in->id].file_name;
1462 out->params.source.n_bytes_per_pkt =
1463 app->source_params[in->id].n_bytes_per_pkt;
1472 p_out->n_ports_out = p_in->n_pktq_out;
1473 for (i = 0; i < p_in->n_pktq_out; i++) {
1474 struct app_pktq_out_params *in = &p_in->pktq_out[i];
1475 struct pipeline_port_out_params *out = &p_out->port_out[i];
1478 case APP_PKTQ_OUT_HWQ:
1480 struct app_pktq_hwq_out_params *p_hwq_out =
1481 &app->hwq_out_params[in->id];
1482 struct app_link_params *p_link =
1483 app_get_link_for_txq(app, p_hwq_out);
1484 uint32_t txq_link_id, txq_queue_id;
1486 sscanf(p_hwq_out->name,
1487 "TXQ%" SCNu32 ".%" SCNu32,
1491 if (p_hwq_out->dropless == 0) {
1492 struct rte_port_ethdev_writer_params *params =
1493 &out->params.ethdev;
1495 out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
1496 params->port_id = p_link->pmd_id;
1497 params->queue_id = txq_queue_id;
1498 params->tx_burst_sz =
1499 app->hwq_out_params[in->id].burst;
1501 struct rte_port_ethdev_writer_nodrop_params
1502 *params = &out->params.ethdev_nodrop;
1505 PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
1506 params->port_id = p_link->pmd_id;
1507 params->queue_id = txq_queue_id;
1508 params->tx_burst_sz = p_hwq_out->burst;
1509 params->n_retries = p_hwq_out->n_retries;
1513 case APP_PKTQ_OUT_SWQ:
1515 struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
1517 if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
1518 if (app_swq_get_writers(app, swq_params) == 1) {
1519 if (app->swq_params[in->id].dropless == 0) {
1520 struct rte_port_ring_writer_params *params =
1523 out->type = PIPELINE_PORT_OUT_RING_WRITER;
1524 params->ring = app->swq[in->id];
1525 params->tx_burst_sz =
1526 app->swq_params[in->id].burst_write;
1528 struct rte_port_ring_writer_nodrop_params
1529 *params = &out->params.ring_nodrop;
1532 PIPELINE_PORT_OUT_RING_WRITER_NODROP;
1533 params->ring = app->swq[in->id];
1534 params->tx_burst_sz =
1535 app->swq_params[in->id].burst_write;
1537 app->swq_params[in->id].n_retries;
1540 if (swq_params->dropless == 0) {
1541 struct rte_port_ring_multi_writer_params *params =
1542 &out->params.ring_multi;
1544 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
1545 params->ring = app->swq[in->id];
1546 params->tx_burst_sz = swq_params->burst_write;
1548 struct rte_port_ring_multi_writer_nodrop_params
1549 *params = &out->params.ring_multi_nodrop;
1551 out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
1552 params->ring = app->swq[in->id];
1553 params->tx_burst_sz = swq_params->burst_write;
1554 params->n_retries = swq_params->n_retries;
1558 if (swq_params->ipv4_ras == 1) {
1559 struct rte_port_ring_writer_ipv4_ras_params *params =
1560 &out->params.ring_ipv4_ras;
1562 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
1563 params->ring = app->swq[in->id];
1564 params->tx_burst_sz = swq_params->burst_write;
1566 struct rte_port_ring_writer_ipv6_ras_params *params =
1567 &out->params.ring_ipv6_ras;
1569 out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
1570 params->ring = app->swq[in->id];
1571 params->tx_burst_sz = swq_params->burst_write;
1576 case APP_PKTQ_OUT_TM:
1578 struct rte_port_sched_writer_params *params =
1581 out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
1582 params->sched = app->tm[in->id];
1583 params->tx_burst_sz =
1584 app->tm_params[in->id].burst_write;
1587 #ifdef RTE_EXEC_ENV_LINUXAPP
1588 case APP_PKTQ_OUT_TAP:
1590 struct rte_port_fd_writer_params *params =
1593 out->type = PIPELINE_PORT_OUT_FD_WRITER;
1594 params->fd = app->tap[in->id];
1595 params->tx_burst_sz =
1596 app->tap_params[in->id].burst_write;
1600 #ifdef RTE_LIBRTE_KNI
1601 case APP_PKTQ_OUT_KNI:
1603 struct app_pktq_kni_params *p_kni =
1604 &app->kni_params[in->id];
1606 if (p_kni->dropless == 0) {
1607 struct rte_port_kni_writer_params *params =
1610 out->type = PIPELINE_PORT_OUT_KNI_WRITER;
1611 params->kni = app->kni[in->id];
1612 params->tx_burst_sz =
1613 app->kni_params[in->id].burst_write;
1615 struct rte_port_kni_writer_nodrop_params
1616 *params = &out->params.kni_nodrop;
1618 out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
1619 params->kni = app->kni[in->id];
1620 params->tx_burst_sz =
1621 app->kni_params[in->id].burst_write;
1623 app->kni_params[in->id].n_retries;
1627 #endif /* RTE_LIBRTE_KNI */
1628 case APP_PKTQ_OUT_SINK:
1630 out->type = PIPELINE_PORT_OUT_SINK;
1631 out->params.sink.file_name =
1632 app->sink_params[in->id].file_name;
1633 out->params.sink.max_n_pkts =
1634 app->sink_params[in->id].
1645 p_out->n_msgq = p_in->n_msgq_in;
1647 for (i = 0; i < p_in->n_msgq_in; i++)
1648 p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
1650 for (i = 0; i < p_in->n_msgq_out; i++)
1651 p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
1654 p_out->n_args = p_in->n_args;
1655 for (i = 0; i < p_in->n_args; i++) {
1656 p_out->args_name[i] = p_in->args_name[i];
1657 p_out->args_value[i] = p_in->args_value[i];
1662 app_init_pipelines(struct app_params *app)
1666 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1667 struct app_pipeline_params *params =
1668 &app->pipeline_params[p_id];
1669 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1670 struct pipeline_type *ptype;
1671 struct pipeline_params pp;
1673 APP_LOG(app, HIGH, "Initializing %s ...", params->name);
1675 ptype = app_pipeline_type_find(app, params->type);
1677 rte_panic("Init error: Unknown pipeline type \"%s\"\n",
1680 app_pipeline_params_get(app, params, &pp);
1684 if (ptype->be_ops->f_init) {
1685 data->be = ptype->be_ops->f_init(&pp, (void *) app);
1687 if (data->be == NULL)
1688 rte_panic("Pipeline instance \"%s\" back-end "
1689 "init error\n", params->name);
1694 if (ptype->fe_ops->f_init) {
1695 data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
1697 if (data->fe == NULL)
1698 rte_panic("Pipeline instance \"%s\" front-end "
1699 "init error\n", params->name);
1702 data->ptype = ptype;
1704 data->timer_period = (rte_get_tsc_hz() *
1705 params->timer_period) / 1000;
1710 app_post_init_pipelines(struct app_params *app)
1714 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1715 struct app_pipeline_params *params =
1716 &app->pipeline_params[p_id];
1717 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1720 if (data->ptype->fe_ops->f_post_init == NULL)
1723 status = data->ptype->fe_ops->f_post_init(data->fe);
1725 rte_panic("Pipeline instance \"%s\" front-end "
1726 "post-init error\n", params->name);
1731 app_init_threads(struct app_params *app)
1733 uint64_t time = rte_get_tsc_cycles();
1736 for (p_id = 0; p_id < app->n_pipelines; p_id++) {
1737 struct app_pipeline_params *params =
1738 &app->pipeline_params[p_id];
1739 struct app_pipeline_data *data = &app->pipeline_data[p_id];
1740 struct pipeline_type *ptype;
1741 struct app_thread_data *t;
1742 struct app_thread_pipeline_data *p;
1745 lcore_id = cpu_core_map_get_lcore_id(app->core_map,
1748 params->hyper_th_id);
1751 rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
1754 (params->hyper_th_id) ? "h" : "");
1756 t = &app->thread_data[lcore_id];
1758 t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
1759 t->thread_req_deadline = time + t->timer_period;
1761 t->headroom_cycles = 0;
1762 t->headroom_time = rte_get_tsc_cycles();
1763 t->headroom_ratio = 0.0;
1765 t->msgq_in = app_thread_msgq_in_get(app,
1768 params->hyper_th_id);
1769 if (t->msgq_in == NULL)
1770 rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
1773 t->msgq_out = app_thread_msgq_out_get(app,
1776 params->hyper_th_id);
1777 if (t->msgq_out == NULL)
1778 rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
1781 ptype = app_pipeline_type_find(app, params->type);
1783 rte_panic("Init error: Unknown pipeline "
1784 "type \"%s\"\n", params->type);
1786 p = (ptype->be_ops->f_run == NULL) ?
1787 &t->regular[t->n_regular] :
1788 &t->custom[t->n_custom];
1790 p->pipeline_id = p_id;
1792 p->f_run = ptype->be_ops->f_run;
1793 p->f_timer = ptype->be_ops->f_timer;
1794 p->timer_period = data->timer_period;
1795 p->deadline = time + data->timer_period;
1799 if (ptype->be_ops->f_run == NULL)
1806 int app_init(struct app_params *app)
1808 app_init_core_map(app);
1809 app_init_core_mask(app);
1812 app_init_mempool(app);
1820 app_pipeline_common_cmd_push(app);
1821 app_pipeline_thread_cmd_push(app);
1822 app_pipeline_type_register(app, &pipeline_master);
1823 app_pipeline_type_register(app, &pipeline_flow_classification);
1824 app_pipeline_type_register(app, &pipeline_flow_actions);
1825 app_pipeline_type_register(app, &pipeline_firewall);
1827 app_init_pipelines(app);
1828 app_init_threads(app);
1833 int app_post_init(struct app_params *app)
1835 app_post_init_pipelines(app);
1841 app_pipeline_type_cmd_push(struct app_params *app,
1842 struct pipeline_type *ptype)
1844 cmdline_parse_ctx_t *cmds;
1847 /* Check input arguments */
1848 if ((app == NULL) ||
1852 n_cmds = pipeline_type_cmds_count(ptype);
1856 cmds = ptype->fe_ops->cmds;
1858 /* Check for available slots in the application commands array */
1859 if (n_cmds > APP_MAX_CMDS - app->n_cmds)
1862 /* Push pipeline commands into the application */
1863 memcpy(&app->cmds[app->n_cmds],
1865 n_cmds * sizeof(cmdline_parse_ctx_t));
1867 for (i = 0; i < n_cmds; i++)
1868 app->cmds[app->n_cmds + i]->data = app;
1870 app->n_cmds += n_cmds;
1871 app->cmds[app->n_cmds] = NULL;
1877 app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
1881 /* Check input arguments */
1882 if ((app == NULL) ||
1884 (ptype->name == NULL) ||
1885 (strlen(ptype->name) == 0) ||
1886 (ptype->be_ops->f_init == NULL) ||
1887 (ptype->be_ops->f_timer == NULL))
1890 /* Check for duplicate entry */
1891 for (i = 0; i < app->n_pipeline_types; i++)
1892 if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
1895 /* Check for resource availability */
1896 n_cmds = pipeline_type_cmds_count(ptype);
1897 if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
1898 (n_cmds > APP_MAX_CMDS - app->n_cmds))
1901 /* Copy pipeline type */
1902 memcpy(&app->pipeline_type[app->n_pipeline_types++],
1904 sizeof(struct pipeline_type));
1906 /* Copy CLI commands */
1908 app_pipeline_type_cmd_push(app, ptype);
1914 pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
1918 for (i = 0; i < app->n_pipeline_types; i++)
1919 if (strcmp(app->pipeline_type[i].name, name) == 0)
1920 return &app->pipeline_type[i];