X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fip_pipeline%2Finit.c;h=f848310e8a8706c5b882a8b575ba6536bc9db452;hb=998eab3001744c7fa88d37a1a1966fdf2c00feb9;hp=0f3bf0ba8b84afef4a072b2da0d3627a0c22aaa2;hpb=947024a26df7cd91290f8ef8af63ec6210a13af4;p=dpdk.git diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c index 0f3bf0ba8b..f848310e8a 100644 --- a/examples/ip_pipeline/init.c +++ b/examples/ip_pipeline/init.c @@ -1,39 +1,18 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include #include #include +#include +#ifdef RTE_EXEC_ENV_LINUXAPP +#include +#include +#endif +#include +#include +#include #include #include @@ -41,20 +20,24 @@ #include #include #include +#include #include "app.h" #include "pipeline.h" #include "pipeline_common_fe.h" #include "pipeline_master.h" -#include "pipeline_passthrough.h" +#include "thread_fe.h" #define APP_NAME_SIZE 32 +#define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE) + static void app_init_core_map(struct app_params *app) { APP_LOG(app, HIGH, "Initializing CPU core map ..."); - app->core_map = cpu_core_map_init(4, 32, 4, 0); + app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE, + 4, 0); if (app->core_map == NULL) rte_panic("Cannot create CPU core map\n"); @@ -63,11 +46,14 @@ app_init_core_map(struct app_params *app) cpu_core_map_print(app->core_map); } +/* Core Mask String in Hex Representation */ +#define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1) + static void app_init_core_mask(struct app_params *app) { - uint64_t mask = 0; uint32_t i; + char core_mask_str[APP_CORE_MASK_STRING_SIZE]; for (i = 0; i < app->n_pipelines; i++) { struct app_pipeline_params *p = &app->pipeline_params[i]; @@ -81,24 +67,27 @@ app_init_core_mask(struct app_params *app) if (lcore_id < 0) rte_panic("Cannot create CPU core mask\n"); - mask |= 1LLU << lcore_id; + app_core_enable_in_core_mask(app, lcore_id); } - app->core_mask = mask; - APP_LOG(app, HIGH, "CPU core mask = 0x%016" PRIx64, app->core_mask); + app_core_build_core_mask_string(app, core_mask_str); + APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str); } static void app_init_eal(struct app_params *app) { - char buffer[32]; + char buffer[256]; + char core_mask_str[APP_CORE_MASK_STRING_SIZE]; struct app_eal_params *p = &app->eal_params; uint32_t n_args = 0; + uint32_t i; int status; app->eal_argv[n_args++] = strdup(app->app_name); - snprintf(buffer, sizeof(buffer), "-c%" PRIx64, app->core_mask); + app_core_build_core_mask_string(app, core_mask_str); + snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str); app->eal_argv[n_args++] = strdup(buffer); if (p->coremap) { @@ -127,24 +116,47 @@ app_init_eal(struct app_params *app) app->eal_argv[n_args++] = strdup(buffer); } - if (p->pci_blacklist) { + for (i = 0; i < APP_MAX_LINKS; i++) { + if (p->pci_blacklist[i] == NULL) + break; + snprintf(buffer, sizeof(buffer), "--pci-blacklist=%s", - p->pci_blacklist); + p->pci_blacklist[i]); app->eal_argv[n_args++] = strdup(buffer); } - if (p->pci_whitelist) { + if (app->port_mask != 0) + for (i = 0; i < APP_MAX_LINKS; i++) { + if (p->pci_whitelist[i] == NULL) + break; + + snprintf(buffer, + sizeof(buffer), + "--pci-whitelist=%s", + p->pci_whitelist[i]); + app->eal_argv[n_args++] = strdup(buffer); + } + else + for (i = 0; i < app->n_links; i++) { + char *pci_bdf = app->link_params[i].pci_bdf; + + snprintf(buffer, + sizeof(buffer), + "--pci-whitelist=%s", + pci_bdf); + app->eal_argv[n_args++] = strdup(buffer); + } + + for (i = 0; i < APP_MAX_LINKS; i++) { + if (p->vdev[i] == NULL) + break; + snprintf(buffer, sizeof(buffer), - "--pci-whitelist=%s", - p->pci_whitelist); - app->eal_argv[n_args++] = strdup(buffer); - } - - if (p->vdev) { - snprintf(buffer, sizeof(buffer), "--vdev=%s", p->vdev); + "--vdev=%s", + p->vdev[i]); app->eal_argv[n_args++] = strdup(buffer); } @@ -205,7 +217,7 @@ app_init_eal(struct app_params *app) } if (p->add_driver) { - snprintf(buffer, sizeof(buffer), "-d=%s", p->add_driver); + snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver); app->eal_argv[n_args++] = strdup(buffer); } @@ -251,17 +263,21 @@ app_init_eal(struct app_params *app) app->eal_argv[n_args++] = strdup(buffer); } - if ((p->xen_dom0_present) && (p->xen_dom0)) { - snprintf(buffer, sizeof(buffer), "--xen-dom0"); - app->eal_argv[n_args++] = strdup(buffer); - } - snprintf(buffer, sizeof(buffer), "--"); app->eal_argv[n_args++] = strdup(buffer); app->eal_argc = n_args; APP_LOG(app, HIGH, "Initializing EAL ..."); + if (app->log_level >= APP_LOG_LEVEL_LOW) { + int i; + + fprintf(stdout, "[APP] EAL arguments: \""); + for (i = 1; i < app->eal_argc; i++) + fprintf(stdout, "%s ", app->eal_argv[i]); + fprintf(stdout, "\"\n"); + } + status = rte_eal_init(app->eal_argc, app->eal_argv); if (status < 0) rte_panic("EAL init error\n"); @@ -276,16 +292,14 @@ app_init_mempool(struct app_params *app) struct app_mempool_params *p = &app->mempool_params[i]; APP_LOG(app, HIGH, "Initializing %s ...", p->name); - app->mempool[i] = rte_mempool_create( - p->name, - p->pool_size, - p->buffer_size, - p->cache_size, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - p->cpu_socket_id, - 0); + app->mempool[i] = rte_pktmbuf_pool_create( + p->name, + p->pool_size, + p->cache_size, + 0, /* priv_size */ + p->buffer_size - + sizeof(struct rte_mbuf), /* mbuf data size */ + p->cpu_socket_id); if (app->mempool[i] == NULL) rte_panic("%s init error\n", p->name); @@ -312,7 +326,7 @@ app_link_filter_tcp_syn_add(struct app_link_params *link) { struct rte_eth_syn_filter filter = { .hig_pri = 1, - .queue = link->tcp_syn_local_q, + .queue = link->tcp_syn_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, @@ -550,18 +564,18 @@ app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp) static void app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp) { - if (cp->tcp_syn_local_q != 0) { + if (cp->tcp_syn_q != 0) { int status = app_link_filter_tcp_syn_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP SYN filter (queue = %" PRIu32 ")", - cp->name, cp->pmd_id, cp->tcp_syn_local_q); + cp->name, cp->pmd_id, cp->tcp_syn_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP SYN filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", - cp->name, cp->pmd_id, cp->tcp_syn_local_q, + cp->name, cp->pmd_id, cp->tcp_syn_q, status); } } @@ -665,9 +679,10 @@ app_link_up_internal(struct app_params *app, struct app_link_params *cp) /* PMD link up */ status = rte_eth_dev_set_link_up(cp->pmd_id); - if (status < 0) - rte_panic("%s (%" PRIu32 "): PMD set up error %" PRId32 "\n", - cp->name, cp->pmd_id, status); + /* Do not panic if PMD does not provide link up functionality */ + if (status < 0 && status != -ENOTSUP) + rte_panic("%s (%" PRIu32 "): PMD set link up error %" + PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as UP */ cp->state = 1; @@ -677,9 +692,14 @@ void app_link_down_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; + int status; /* PMD link down */ - rte_eth_dev_set_link_down(cp->pmd_id); + status = rte_eth_dev_set_link_down(cp->pmd_id); + /* Do not panic if PMD does not provide link down functionality */ + if (status < 0 && status != -ENOTSUP) + rte_panic("%s (%" PRIu32 "): PMD set link down error %" + PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as DOWN */ cp->state = 0; @@ -792,7 +812,7 @@ app_check_link(struct app_params *app) link_params.link_speed / 1000, link_params.link_status ? "UP" : "DOWN"); - if (link_params.link_status == 0) + if (link_params.link_status == ETH_LINK_DOWN) all_links_up = 0; } @@ -800,19 +820,122 @@ app_check_link(struct app_params *app) rte_panic("Some links are DOWN\n"); } +static uint32_t +is_any_swq_frag_or_ras(struct app_params *app) +{ + uint32_t i; + + for (i = 0; i < app->n_pktq_swq; i++) { + struct app_pktq_swq_params *p = &app->swq_params[i]; + + if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) || + (p->ipv4_ras == 1) || (p->ipv6_ras == 1)) + return 1; + } + + return 0; +} + +static void +app_init_link_frag_ras(struct app_params *app) +{ + uint32_t i; + + if (is_any_swq_frag_or_ras(app)) { + for (i = 0; i < app->n_links; i++) { + struct app_link_params *p_link = &app->link_params[i]; + p_link->conf.txmode.offloads |= + DEV_TX_OFFLOAD_MULTI_SEGS; + } + } +} + +static inline int +app_get_cpu_socket_id(uint32_t pmd_id) +{ + int status = rte_eth_dev_socket_id(pmd_id); + + return (status != SOCKET_ID_ANY) ? status : 0; +} + +static inline int +app_link_rss_enabled(struct app_link_params *cp) +{ + return (cp->n_rss_qs) ? 1 : 0; +} + +static void +app_link_rss_setup(struct app_link_params *cp) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX]; + uint32_t i; + int status; + + /* Get RETA size */ + memset(&dev_info, 0, sizeof(dev_info)); + rte_eth_dev_info_get(cp->pmd_id, &dev_info); + + if (dev_info.reta_size == 0) + rte_panic("%s (%u): RSS setup error (null RETA size)\n", + cp->name, cp->pmd_id); + + if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) + rte_panic("%s (%u): RSS setup error (RETA size too big)\n", + cp->name, cp->pmd_id); + + /* Setup RETA contents */ + memset(reta_conf, 0, sizeof(reta_conf)); + + for (i = 0; i < dev_info.reta_size; i++) + reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; + + for (i = 0; i < dev_info.reta_size; i++) { + uint32_t reta_id = i / RTE_RETA_GROUP_SIZE; + uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE; + uint32_t rss_qs_pos = i % cp->n_rss_qs; + + reta_conf[reta_id].reta[reta_pos] = + (uint16_t) cp->rss_qs[rss_qs_pos]; + } + + /* RETA update */ + status = rte_eth_dev_rss_reta_update(cp->pmd_id, + reta_conf, + dev_info.reta_size); + if (status != 0) + rte_panic("%s (%u): RSS setup error (RETA update failed)\n", + cp->name, cp->pmd_id); +} + +static void +app_init_link_set_config(struct app_link_params *p) +{ + if (p->n_rss_qs) { + p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS; + p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 | + p->rss_proto_ipv6 | + p->rss_proto_l2; + } +} + static void app_init_link(struct app_params *app) { uint32_t i; + app_init_link_frag_ras(app); + for (i = 0; i < app->n_links; i++) { struct app_link_params *p_link = &app->link_params[i]; + struct rte_eth_dev_info dev_info; uint32_t link_id, n_hwq_in, n_hwq_out, j; int status; sscanf(p_link->name, "LINK%" PRIu32, &link_id); n_hwq_in = app_link_get_n_rxq(app, p_link); n_hwq_out = app_link_get_n_txq(app, p_link); + app_init_link_set_config(p_link); APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") " "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...", @@ -822,6 +945,10 @@ app_init_link(struct app_params *app) n_hwq_out); /* LINK */ + rte_eth_dev_info_get(p_link->pmd_id, &dev_info); + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + p_link->conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; status = rte_eth_dev_configure( p_link->pmd_id, n_hwq_in, @@ -843,17 +970,32 @@ app_init_link(struct app_params *app) struct app_pktq_hwq_in_params *p_rxq = &app->hwq_in_params[j]; uint32_t rxq_link_id, rxq_queue_id; + uint16_t nb_rxd = p_rxq->size; sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32, &rxq_link_id, &rxq_queue_id); if (rxq_link_id != link_id) continue; + status = rte_eth_dev_adjust_nb_rx_tx_desc( + p_link->pmd_id, + &nb_rxd, + NULL); + if (status < 0) + rte_panic("%s (%" PRIu32 "): " + "%s adjust number of Rx descriptors " + "error (%" PRId32 ")\n", + p_link->name, + p_link->pmd_id, + p_rxq->name, + status); + + p_rxq->conf.offloads = p_link->conf.rxmode.offloads; status = rte_eth_rx_queue_setup( p_link->pmd_id, rxq_queue_id, - p_rxq->size, - rte_eth_dev_socket_id(p_link->pmd_id), + nb_rxd, + app_get_cpu_socket_id(p_link->pmd_id), &p_rxq->conf, app->mempool[p_rxq->mempool_id]); if (status < 0) @@ -870,17 +1012,32 @@ app_init_link(struct app_params *app) struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[j]; uint32_t txq_link_id, txq_queue_id; + uint16_t nb_txd = p_txq->size; sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32, &txq_link_id, &txq_queue_id); if (txq_link_id != link_id) continue; + status = rte_eth_dev_adjust_nb_rx_tx_desc( + p_link->pmd_id, + NULL, + &nb_txd); + if (status < 0) + rte_panic("%s (%" PRIu32 "): " + "%s adjust number of Tx descriptors " + "error (%" PRId32 ")\n", + p_link->name, + p_link->pmd_id, + p_txq->name, + status); + + p_txq->conf.offloads = p_link->conf.txmode.offloads; status = rte_eth_tx_queue_setup( p_link->pmd_id, txq_queue_id, - p_txq->size, - rte_eth_dev_socket_id(p_link->pmd_id), + nb_txd, + app_get_cpu_socket_id(p_link->pmd_id), &p_txq->conf); if (status < 0) rte_panic("%s (%" PRIu32 "): " @@ -897,9 +1054,13 @@ app_init_link(struct app_params *app) rte_panic("Cannot start %s (error %" PRId32 ")\n", p_link->name, status); - /* LINK UP */ + /* LINK FILTERS */ app_link_set_arp_filter(app, p_link); app_link_set_tcp_syn_filter(app, p_link); + if (app_link_rss_enabled(p_link)) + app_link_rss_setup(p_link); + + /* LINK UP */ app_link_up_internal(app, p_link); } @@ -913,13 +1074,19 @@ app_init_swq(struct app_params *app) for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; + unsigned flags = 0; + + if (app_swq_get_readers(app, p) == 1) + flags |= RING_F_SC_DEQ; + if (app_swq_get_writers(app, p) == 1) + flags |= RING_F_SP_ENQ; APP_LOG(app, HIGH, "Initializing %s...", p->name); app->swq[i] = rte_ring_create( p->name, p->size, p->cpu_socket_id, - RING_F_SP_ENQ | RING_F_SC_DEQ); + flags); if (app->swq[i] == NULL) rte_panic("%s init error\n", p->name); @@ -946,7 +1113,7 @@ app_init_tm(struct app_params *app) /* TM */ p_tm->sched_port_params.name = p_tm->name; p_tm->sched_port_params.socket = - rte_eth_dev_socket_id(p_link->pmd_id); + app_get_cpu_socket_id(p_link->pmd_id); p_tm->sched_port_params.rate = (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8; @@ -998,6 +1165,149 @@ app_init_tm(struct app_params *app) } } +#ifndef RTE_EXEC_ENV_LINUXAPP +static void +app_init_tap(struct app_params *app) { + if (app->n_pktq_tap == 0) + return; + + rte_panic("TAP device not supported.\n"); +} +#else +static void +app_init_tap(struct app_params *app) +{ + uint32_t i; + + for (i = 0; i < app->n_pktq_tap; i++) { + struct app_pktq_tap_params *p_tap = &app->tap_params[i]; + struct ifreq ifr; + int fd, status; + + APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name); + + fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); + if (fd < 0) + rte_panic("Cannot open file /dev/net/tun\n"); + + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ + snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name); + + status = ioctl(fd, TUNSETIFF, (void *) &ifr); + if (status < 0) + rte_panic("TAP setup error\n"); + + app->tap[i] = fd; + } +} +#endif + +#ifdef RTE_LIBRTE_KNI +static int +kni_config_network_interface(uint16_t port_id, uint8_t if_up) { + int ret = 0; + + if (port_id >= rte_eth_dev_count()) + return -EINVAL; + + ret = (if_up) ? + rte_eth_dev_set_link_up(port_id) : + rte_eth_dev_set_link_down(port_id); + + return ret; +} + +static int +kni_change_mtu(uint16_t port_id, unsigned int new_mtu) { + int ret; + + if (port_id >= rte_eth_dev_count()) + return -EINVAL; + + if (new_mtu > ETHER_MAX_LEN) + return -EINVAL; + + /* Set new MTU */ + ret = rte_eth_dev_set_mtu(port_id, new_mtu); + if (ret < 0) + return ret; + + return 0; +} +#endif /* RTE_LIBRTE_KNI */ + +#ifndef RTE_LIBRTE_KNI +static void +app_init_kni(struct app_params *app) { + if (app->n_pktq_kni == 0) + return; + + rte_panic("Can not init KNI without librte_kni support.\n"); +} +#else +static void +app_init_kni(struct app_params *app) { + uint32_t i; + + if (app->n_pktq_kni == 0) + return; + + rte_kni_init(app->n_pktq_kni); + + for (i = 0; i < app->n_pktq_kni; i++) { + struct app_pktq_kni_params *p_kni = &app->kni_params[i]; + struct app_link_params *p_link; + struct rte_eth_dev_info dev_info; + struct app_mempool_params *mempool_params; + struct rte_mempool *mempool; + struct rte_kni_conf conf; + struct rte_kni_ops ops; + + /* LINK */ + p_link = app_get_link_for_kni(app, p_kni); + memset(&dev_info, 0, sizeof(dev_info)); + rte_eth_dev_info_get(p_link->pmd_id, &dev_info); + + /* MEMPOOL */ + mempool_params = &app->mempool_params[p_kni->mempool_id]; + mempool = app->mempool[p_kni->mempool_id]; + + /* KNI */ + memset(&conf, 0, sizeof(conf)); + snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name); + conf.force_bind = p_kni->force_bind; + if (conf.force_bind) { + int lcore_id; + + lcore_id = cpu_core_map_get_lcore_id(app->core_map, + p_kni->socket_id, + p_kni->core_id, + p_kni->hyper_th_id); + + if (lcore_id < 0) + rte_panic("%s invalid CPU core\n", p_kni->name); + + conf.core_id = (uint32_t) lcore_id; + } + conf.group_id = p_link->pmd_id; + conf.mbuf_size = mempool_params->buffer_size; + conf.addr = dev_info.pci_dev->addr; + conf.id = dev_info.pci_dev->id; + + memset(&ops, 0, sizeof(ops)); + ops.port_id = (uint8_t) p_link->pmd_id; + ops.change_mtu = kni_change_mtu; + ops.config_network_if = kni_config_network_interface; + + APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name); + app->kni[i] = rte_kni_alloc(mempool, &conf, &ops); + if (!app->kni[i]) + rte_panic("%s init error\n", p_kni->name); + } +} +#endif /* RTE_LIBRTE_KNI */ + static void app_init_msgq(struct app_params *app) { @@ -1018,13 +1328,15 @@ app_init_msgq(struct app_params *app) } } -static void app_pipeline_params_get(struct app_params *app, +void app_pipeline_params_get(struct app_params *app, struct app_pipeline_params *p_in, struct pipeline_params *p_out) { uint32_t i; - strcpy(p_out->name, p_in->name); + snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name); + + snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type); p_out->socket_id = (int) p_in->socket_id; @@ -1056,21 +1368,98 @@ static void app_pipeline_params_get(struct app_params *app, break; } case APP_PKTQ_IN_SWQ: - out->type = PIPELINE_PORT_IN_RING_READER; - out->params.ring.ring = app->swq[in->id]; - out->burst_size = app->swq_params[in->id].burst_read; - /* What about frag and ras ports? */ + { + struct app_pktq_swq_params *swq_params = &app->swq_params[in->id]; + + if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) { + if (app_swq_get_readers(app, swq_params) == 1) { + out->type = PIPELINE_PORT_IN_RING_READER; + out->params.ring.ring = app->swq[in->id]; + out->burst_size = app->swq_params[in->id].burst_read; + } else { + out->type = PIPELINE_PORT_IN_RING_MULTI_READER; + out->params.ring_multi.ring = app->swq[in->id]; + out->burst_size = swq_params->burst_read; + } + } else { + if (swq_params->ipv4_frag == 1) { + struct rte_port_ring_reader_ipv4_frag_params *params = + &out->params.ring_ipv4_frag; + + out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG; + params->ring = app->swq[in->id]; + params->mtu = swq_params->mtu; + params->metadata_size = swq_params->metadata_size; + params->pool_direct = + app->mempool[swq_params->mempool_direct_id]; + params->pool_indirect = + app->mempool[swq_params->mempool_indirect_id]; + out->burst_size = swq_params->burst_read; + } else { + struct rte_port_ring_reader_ipv6_frag_params *params = + &out->params.ring_ipv6_frag; + + out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG; + params->ring = app->swq[in->id]; + params->mtu = swq_params->mtu; + params->metadata_size = swq_params->metadata_size; + params->pool_direct = + app->mempool[swq_params->mempool_direct_id]; + params->pool_indirect = + app->mempool[swq_params->mempool_indirect_id]; + out->burst_size = swq_params->burst_read; + } + } break; + } case APP_PKTQ_IN_TM: + { out->type = PIPELINE_PORT_IN_SCHED_READER; out->params.sched.sched = app->tm[in->id]; out->burst_size = app->tm_params[in->id].burst_read; break; + } +#ifdef RTE_EXEC_ENV_LINUXAPP + case APP_PKTQ_IN_TAP: + { + struct app_pktq_tap_params *tap_params = + &app->tap_params[in->id]; + struct app_mempool_params *mempool_params = + &app->mempool_params[tap_params->mempool_id]; + struct rte_mempool *mempool = + app->mempool[tap_params->mempool_id]; + + out->type = PIPELINE_PORT_IN_FD_READER; + out->params.fd.fd = app->tap[in->id]; + out->params.fd.mtu = mempool_params->buffer_size; + out->params.fd.mempool = mempool; + out->burst_size = app->tap_params[in->id].burst_read; + break; + } +#endif +#ifdef RTE_LIBRTE_KNI + case APP_PKTQ_IN_KNI: + { + out->type = PIPELINE_PORT_IN_KNI_READER; + out->params.kni.kni = app->kni[in->id]; + out->burst_size = app->kni_params[in->id].burst_read; + break; + } +#endif /* RTE_LIBRTE_KNI */ case APP_PKTQ_IN_SOURCE: + { + uint32_t mempool_id = + app->source_params[in->id].mempool_id; + out->type = PIPELINE_PORT_IN_SOURCE; - out->params.source.mempool = app->mempool[in->id]; + out->params.source.mempool = app->mempool[mempool_id]; out->burst_size = app->source_params[in->id].burst; + out->params.source.file_name = + app->source_params[in->id].file_name; + out->params.source.n_bytes_per_pkt = + app->source_params[in->id].n_bytes_per_pkt; break; + } default: break; } @@ -1119,29 +1508,70 @@ static void app_pipeline_params_get(struct app_params *app, break; } case APP_PKTQ_OUT_SWQ: - if (app->swq_params[in->id].dropless == 0) { - struct rte_port_ring_writer_params *params = - &out->params.ring; - - out->type = PIPELINE_PORT_OUT_RING_WRITER; - params->ring = app->swq[in->id]; - params->tx_burst_sz = - app->swq_params[in->id].burst_write; + { + struct app_pktq_swq_params *swq_params = &app->swq_params[in->id]; + + if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) { + if (app_swq_get_writers(app, swq_params) == 1) { + if (app->swq_params[in->id].dropless == 0) { + struct rte_port_ring_writer_params *params = + &out->params.ring; + + out->type = PIPELINE_PORT_OUT_RING_WRITER; + params->ring = app->swq[in->id]; + params->tx_burst_sz = + app->swq_params[in->id].burst_write; + } else { + struct rte_port_ring_writer_nodrop_params + *params = &out->params.ring_nodrop; + + out->type = + PIPELINE_PORT_OUT_RING_WRITER_NODROP; + params->ring = app->swq[in->id]; + params->tx_burst_sz = + app->swq_params[in->id].burst_write; + params->n_retries = + app->swq_params[in->id].n_retries; + } + } else { + if (swq_params->dropless == 0) { + struct rte_port_ring_multi_writer_params *params = + &out->params.ring_multi; + + out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER; + params->ring = app->swq[in->id]; + params->tx_burst_sz = swq_params->burst_write; + } else { + struct rte_port_ring_multi_writer_nodrop_params + *params = &out->params.ring_multi_nodrop; + + out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP; + params->ring = app->swq[in->id]; + params->tx_burst_sz = swq_params->burst_write; + params->n_retries = swq_params->n_retries; + } + } } else { - struct rte_port_ring_writer_nodrop_params - *params = &out->params.ring_nodrop; - - out->type = - PIPELINE_PORT_OUT_RING_WRITER_NODROP; - params->ring = app->swq[in->id]; - params->tx_burst_sz = - app->swq_params[in->id].burst_write; - params->n_retries = - app->swq_params[in->id].n_retries; + if (swq_params->ipv4_ras == 1) { + struct rte_port_ring_writer_ipv4_ras_params *params = + &out->params.ring_ipv4_ras; + + out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS; + params->ring = app->swq[in->id]; + params->tx_burst_sz = swq_params->burst_write; + } else { + struct rte_port_ring_writer_ipv6_ras_params *params = + &out->params.ring_ipv6_ras; + + out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS; + params->ring = app->swq[in->id]; + params->tx_burst_sz = swq_params->burst_write; + } } - /* What about frag and ras ports? */ break; - case APP_PKTQ_OUT_TM: { + } + case APP_PKTQ_OUT_TM: + { struct rte_port_sched_writer_params *params = &out->params.sched; @@ -1151,9 +1581,58 @@ static void app_pipeline_params_get(struct app_params *app, app->tm_params[in->id].burst_write; break; } +#ifdef RTE_EXEC_ENV_LINUXAPP + case APP_PKTQ_OUT_TAP: + { + struct rte_port_fd_writer_params *params = + &out->params.fd; + + out->type = PIPELINE_PORT_OUT_FD_WRITER; + params->fd = app->tap[in->id]; + params->tx_burst_sz = + app->tap_params[in->id].burst_write; + break; + } +#endif +#ifdef RTE_LIBRTE_KNI + case APP_PKTQ_OUT_KNI: + { + struct app_pktq_kni_params *p_kni = + &app->kni_params[in->id]; + + if (p_kni->dropless == 0) { + struct rte_port_kni_writer_params *params = + &out->params.kni; + + out->type = PIPELINE_PORT_OUT_KNI_WRITER; + params->kni = app->kni[in->id]; + params->tx_burst_sz = + app->kni_params[in->id].burst_write; + } else { + struct rte_port_kni_writer_nodrop_params + *params = &out->params.kni_nodrop; + + out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP; + params->kni = app->kni[in->id]; + params->tx_burst_sz = + app->kni_params[in->id].burst_write; + params->n_retries = + app->kni_params[in->id].n_retries; + } + break; + } +#endif /* RTE_LIBRTE_KNI */ case APP_PKTQ_OUT_SINK: + { out->type = PIPELINE_PORT_OUT_SINK; + out->params.sink.file_name = + app->sink_params[in->id].file_name; + out->params.sink.max_n_pkts = + app->sink_params[in->id]. + n_pkts_to_dump; + break; + } default: break; } @@ -1217,8 +1696,31 @@ app_init_pipelines(struct app_params *app) "init error\n", params->name); } - data->timer_period = (rte_get_tsc_hz() * params->timer_period) - / 1000; + data->ptype = ptype; + + data->timer_period = (rte_get_tsc_hz() * + params->timer_period) / 1000; + } +} + +static void +app_post_init_pipelines(struct app_params *app) +{ + uint32_t p_id; + + for (p_id = 0; p_id < app->n_pipelines; p_id++) { + struct app_pipeline_params *params = + &app->pipeline_params[p_id]; + struct app_pipeline_data *data = &app->pipeline_data[p_id]; + int status; + + if (data->ptype->fe_ops->f_post_init == NULL) + continue; + + status = data->ptype->fe_ops->f_post_init(data->fe); + if (status) + rte_panic("Pipeline instance \"%s\" front-end " + "post-init error\n", params->name); } } @@ -1250,6 +1752,29 @@ app_init_threads(struct app_params *app) t = &app->thread_data[lcore_id]; + t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000; + t->thread_req_deadline = time + t->timer_period; + + t->headroom_cycles = 0; + t->headroom_time = rte_get_tsc_cycles(); + t->headroom_ratio = 0.0; + + t->msgq_in = app_thread_msgq_in_get(app, + params->socket_id, + params->core_id, + params->hyper_th_id); + if (t->msgq_in == NULL) + rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32, + lcore_id); + + t->msgq_out = app_thread_msgq_out_get(app, + params->socket_id, + params->core_id, + params->hyper_th_id); + if (t->msgq_out == NULL) + rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32, + lcore_id); + ptype = app_pipeline_type_find(app, params->type); if (ptype == NULL) rte_panic("Init error: Unknown pipeline " @@ -1259,12 +1784,15 @@ app_init_threads(struct app_params *app) &t->regular[t->n_regular] : &t->custom[t->n_custom]; + p->pipeline_id = p_id; p->be = data->be; p->f_run = ptype->be_ops->f_run; p->f_timer = ptype->be_ops->f_timer; p->timer_period = data->timer_period; p->deadline = time + data->timer_period; + data->enabled = 1; + if (ptype->be_ops->f_run == NULL) t->n_regular++; else @@ -1282,11 +1810,13 @@ int app_init(struct app_params *app) app_init_link(app); app_init_swq(app); app_init_tm(app); + app_init_tap(app); + app_init_kni(app); app_init_msgq(app); app_pipeline_common_cmd_push(app); + app_pipeline_thread_cmd_push(app); app_pipeline_type_register(app, &pipeline_master); - app_pipeline_type_register(app, &pipeline_passthrough); app_init_pipelines(app); app_init_threads(app); @@ -1294,6 +1824,13 @@ int app_init(struct app_params *app) return 0; } +int app_post_init(struct app_params *app) +{ + app_post_init_pipelines(app); + + return 0; +} + static int app_pipeline_type_cmd_push(struct app_params *app, struct pipeline_type *ptype) @@ -1319,7 +1856,7 @@ app_pipeline_type_cmd_push(struct app_params *app, /* Push pipeline commands into the application */ memcpy(&app->cmds[app->n_cmds], cmds, - n_cmds * sizeof(cmdline_parse_ctx_t *)); + n_cmds * sizeof(cmdline_parse_ctx_t)); for (i = 0; i < n_cmds; i++) app->cmds[app->n_cmds + i]->data = app;