DIRS-$(CONFIG_RTE_APP_TEST) += test
DIRS-$(CONFIG_RTE_LIBRTE_ACL) += test-acl
+DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += dump_cfg
--- /dev/null
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+APP = testpipeline
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_PCAP),y)
+LDFLAGS += -lpcap
+endif
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += runtime.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_stub.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_lpm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_lpm_ipv6.c
+
+# include ACL lib if available
+ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_acl.c
+endif
+
+# this application needs libraries first
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+#include <rte_string_fns.h>
+
+#include "main.h"
+
+struct app_params app;
+
+static const char usage[] = "\n";
+
+void
+app_print_usage(void)
+{
+ printf(usage);
+}
+
+static int
+app_parse_port_mask(const char *arg)
+{
+ char *end = NULL;
+ uint64_t port_mask;
+ uint32_t i;
+
+ if (arg[0] == '\0')
+ return -1;
+
+ port_mask = strtoul(arg, &end, 16);
+ if ((end == NULL) || (*end != '\0'))
+ return -2;
+
+ if (port_mask == 0)
+ return -3;
+
+ app.n_ports = 0;
+ for (i = 0; i < 64; i++) {
+ if ((port_mask & (1LLU << i)) == 0)
+ continue;
+
+ if (app.n_ports >= APP_MAX_PORTS)
+ return -4;
+
+ app.ports[app.n_ports] = i;
+ app.n_ports++;
+ }
+
+ if (!rte_is_power_of_2(app.n_ports))
+ return -5;
+
+ return 0;
+}
+
+struct {
+ const char *name;
+ uint32_t value;
+} app_args_table[] = {
+ {"none", e_APP_PIPELINE_NONE},
+ {"stub", e_APP_PIPELINE_STUB},
+ {"hash-8-ext", e_APP_PIPELINE_HASH_KEY8_EXT},
+ {"hash-8-lru", e_APP_PIPELINE_HASH_KEY8_LRU},
+ {"hash-16-ext", e_APP_PIPELINE_HASH_KEY16_EXT},
+ {"hash-16-lru", e_APP_PIPELINE_HASH_KEY16_LRU},
+ {"hash-32-ext", e_APP_PIPELINE_HASH_KEY32_EXT},
+ {"hash-32-lru", e_APP_PIPELINE_HASH_KEY32_LRU},
+ {"hash-spec-8-ext", e_APP_PIPELINE_HASH_SPEC_KEY8_EXT},
+ {"hash-spec-8-lru", e_APP_PIPELINE_HASH_SPEC_KEY8_LRU},
+ {"hash-spec-16-ext", e_APP_PIPELINE_HASH_SPEC_KEY16_EXT},
+ {"hash-spec-16-lru", e_APP_PIPELINE_HASH_SPEC_KEY16_LRU},
+ {"hash-spec-32-ext", e_APP_PIPELINE_HASH_SPEC_KEY32_EXT},
+ {"hash-spec-32-lru", e_APP_PIPELINE_HASH_SPEC_KEY32_LRU},
+ {"acl", e_APP_PIPELINE_ACL},
+ {"lpm", e_APP_PIPELINE_LPM},
+ {"lpm-ipv6", e_APP_PIPELINE_LPM_IPV6},
+};
+
+int
+app_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ {"none", 0, 0, 0},
+ {"stub", 0, 0, 0},
+ {"hash-8-ext", 0, 0, 0},
+ {"hash-8-lru", 0, 0, 0},
+ {"hash-16-ext", 0, 0, 0},
+ {"hash-16-lru", 0, 0, 0},
+ {"hash-32-ext", 0, 0, 0},
+ {"hash-32-lru", 0, 0, 0},
+ {"hash-spec-8-ext", 0, 0, 0},
+ {"hash-spec-8-lru", 0, 0, 0},
+ {"hash-spec-16-ext", 0, 0, 0},
+ {"hash-spec-16-lru", 0, 0, 0},
+ {"hash-spec-32-ext", 0, 0, 0},
+ {"hash-spec-32-lru", 0, 0, 0},
+ {"acl", 0, 0, 0},
+ {"lpm", 0, 0, 0},
+ {"lpm-ipv6", 0, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+ uint32_t lcores[3], n_lcores, lcore_id, pipeline_type_provided;
+
+ /* EAL args */
+ n_lcores = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ if (n_lcores >= 3) {
+ RTE_LOG(ERR, USER1, "Number of cores must be 3\n");
+ app_print_usage();
+ return -1;
+ }
+
+ lcores[n_lcores] = lcore_id;
+ n_lcores++;
+ }
+
+ if (n_lcores != 3) {
+ RTE_LOG(ERR, USER1, "Number of cores must be 3\n");
+ app_print_usage();
+ return -1;
+ }
+
+ app.core_rx = lcores[0];
+ app.core_worker = lcores[1];
+ app.core_tx = lcores[2];
+
+ /* Non-EAL args */
+ argvopt = argv;
+
+ app.pipeline_type = e_APP_PIPELINE_HASH_KEY16_LRU;
+ pipeline_type_provided = 0;
+
+ while ((opt = getopt_long(argc, argvopt, "p:",
+ lgopts, &option_index)) != EOF) {
+ switch (opt) {
+ case 'p':
+ if (app_parse_port_mask(optarg) < 0) {
+ app_print_usage();
+ return -1;
+ }
+ break;
+
+ case 0: /* long options */
+ if (!pipeline_type_provided) {
+ uint32_t i;
+
+ for (i = 0; i < e_APP_PIPELINES; i++) {
+ if (!strcmp(lgopts[option_index].name,
+ app_args_table[i].name)) {
+ app.pipeline_type =
+ app_args_table[i].value;
+ pipeline_type_provided = 1;
+ break;
+ }
+ }
+
+ break;
+ }
+
+ app_print_usage();
+ return -1;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind - 1] = prgname;
+
+ ret = optind - 1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+
+#include "main.h"
+
+struct app_params app = {
+ /* Ports*/
+ .n_ports = APP_MAX_PORTS,
+ .port_rx_ring_size = 128,
+ .port_tx_ring_size = 512,
+
+ /* Rings */
+ .ring_rx_size = 128,
+ .ring_tx_size = 128,
+
+ /* Buffer pool */
+ .pool_buffer_size = 2048 + sizeof(struct rte_mbuf) +
+ RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .pool_cache_size = 256,
+
+ /* Burst sizes */
+ .burst_size_rx_read = 64,
+ .burst_size_rx_write = 64,
+ .burst_size_worker_read = 64,
+ .burst_size_worker_write = 64,
+ .burst_size_tx_read = 64,
+ .burst_size_tx_write = 64,
+};
+
+static struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ .header_split = 0, /* Header Split disabled */
+ .hw_ip_checksum = 1, /* IP checksum offload enabled */
+ .hw_vlan_filter = 0, /* VLAN filtering disabled */
+ .jumbo_frame = 0, /* Jumbo Frame Support disabled */
+ .hw_strip_crc = 0, /* CRC stripped by hardware */
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+static struct rte_eth_rxconf rx_conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+};
+
+static struct rte_eth_txconf tx_conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0,
+ },
+ .tx_free_thresh = 0,
+ .tx_rs_thresh = 0,
+};
+
+static void
+app_init_mbuf_pools(void)
+{
+ /* Init the buffer pool */
+ RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
+ app.pool = rte_mempool_create(
+ "mempool",
+ app.pool_size,
+ app.pool_buffer_size,
+ app.pool_cache_size,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ rte_socket_id(),
+ 0);
+ if (app.pool == NULL)
+ rte_panic("Cannot create mbuf pool\n");
+}
+
+static void
+app_init_rings(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < app.n_ports; i++) {
+ char name[32];
+
+ rte_snprintf(name, sizeof(name), "app_ring_rx_%u", i);
+
+ app.rings_rx[i] = rte_ring_create(
+ name,
+ app.ring_rx_size,
+ rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (app.rings_rx[i] == NULL)
+ rte_panic("Cannot create RX ring %u\n", i);
+ }
+
+ for (i = 0; i < app.n_ports; i++) {
+ char name[32];
+
+ rte_snprintf(name, sizeof(name), "app_ring_tx_%u", i);
+
+ app.rings_tx[i] = rte_ring_create(
+ name,
+ app.ring_tx_size,
+ rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (app.rings_tx[i] == NULL)
+ rte_panic("Cannot create TX ring %u\n", i);
+ }
+
+}
+
+static void
+app_ports_check_link(void)
+{
+ uint32_t all_ports_up, i;
+
+ all_ports_up = 1;
+
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_eth_link link;
+ uint8_t port;
+
+ port = (uint8_t) app.ports[i];
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(port, &link);
+ RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n",
+ port,
+ link.link_speed / 1000,
+ link.link_status ? "UP" : "DOWN");
+
+ if (link.link_status == 0)
+ all_ports_up = 0;
+ }
+
+ if (all_ports_up == 0)
+ rte_panic("Some NIC ports are DOWN\n");
+}
+
+static void
+app_init_ports(void)
+{
+ uint32_t i;
+
+ /* Init driver */
+ RTE_LOG(INFO, USER1, "Initializing the PMD driver ...\n");
+ if (rte_eal_pci_probe() < 0)
+ rte_panic("Cannot probe PCI\n");
+
+ /* Init NIC ports, then start the ports */
+ for (i = 0; i < app.n_ports; i++) {
+ uint8_t port;
+ int ret;
+
+ port = (uint8_t) app.ports[i];
+ RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
+
+ /* Init port */
+ ret = rte_eth_dev_configure(
+ port,
+ 1,
+ 1,
+ &port_conf);
+ if (ret < 0)
+ rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
+
+ rte_eth_promiscuous_enable(port);
+
+ /* Init RX queues */
+ ret = rte_eth_rx_queue_setup(
+ port,
+ 0,
+ app.port_rx_ring_size,
+ rte_eth_dev_socket_id(port),
+ &rx_conf,
+ app.pool);
+ if (ret < 0)
+ rte_panic("Cannot init RX for port %u (%d)\n",
+ (uint32_t) port, ret);
+
+ /* Init TX queues */
+ ret = rte_eth_tx_queue_setup(
+ port,
+ 0,
+ app.port_tx_ring_size,
+ rte_eth_dev_socket_id(port),
+ &tx_conf);
+ if (ret < 0)
+ rte_panic("Cannot init TX for port %u (%d)\n",
+ (uint32_t) port, ret);
+
+ /* Start port */
+ ret = rte_eth_dev_start(port);
+ if (ret < 0)
+ rte_panic("Cannot start port %u (%d)\n", port, ret);
+ }
+
+ app_ports_check_link();
+}
+
+void
+app_init(void)
+{
+ app_init_mbuf_pools();
+ app_init_rings();
+ app_init_ports();
+
+ RTE_LOG(INFO, USER1, "Initialization completed\n");
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+
+#include "main.h"
+
+int
+MAIN(int argc, char **argv)
+{
+ uint32_t lcore;
+ int ret;
+
+ /* Init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ return -1;
+ argc -= ret;
+ argv += ret;
+
+ /* Parse application arguments (after the EAL ones) */
+ ret = app_parse_args(argc, argv);
+ if (ret < 0) {
+ app_print_usage();
+ return -1;
+ }
+
+ /* Init */
+ app_init();
+
+ /* Launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore) {
+ if (rte_eal_wait_lcore(lcore) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+app_lcore_main_loop(__attribute__((unused)) void *arg)
+{
+ unsigned lcore;
+
+ lcore = rte_lcore_id();
+
+ if (lcore == app.core_rx) {
+ switch (app.pipeline_type) {
+ case e_APP_PIPELINE_ACL:
+ app_main_loop_rx();
+ return 0;
+
+ default:
+ app_main_loop_rx_metadata();
+ return 0;
+ }
+ }
+
+ if (lcore == app.core_worker) {
+ switch (app.pipeline_type) {
+ case e_APP_PIPELINE_STUB:
+ app_main_loop_worker_pipeline_stub();
+ return 0;
+
+ case e_APP_PIPELINE_HASH_KEY8_EXT:
+ case e_APP_PIPELINE_HASH_KEY8_LRU:
+ case e_APP_PIPELINE_HASH_KEY16_EXT:
+ case e_APP_PIPELINE_HASH_KEY16_LRU:
+ case e_APP_PIPELINE_HASH_KEY32_EXT:
+ case e_APP_PIPELINE_HASH_KEY32_LRU:
+ case e_APP_PIPELINE_HASH_SPEC_KEY8_EXT:
+ case e_APP_PIPELINE_HASH_SPEC_KEY8_LRU:
+ case e_APP_PIPELINE_HASH_SPEC_KEY16_EXT:
+ case e_APP_PIPELINE_HASH_SPEC_KEY16_LRU:
+ case e_APP_PIPELINE_HASH_SPEC_KEY32_EXT:
+ case e_APP_PIPELINE_HASH_SPEC_KEY32_LRU:
+ app_main_loop_worker_pipeline_hash();
+ return 0;
+
+ case e_APP_PIPELINE_ACL:
+#ifndef RTE_LIBRTE_ACL
+ rte_exit(EXIT_FAILURE, "ACL not present in build\n");
+#else
+ app_main_loop_worker_pipeline_acl();
+ return 0;
+#endif
+
+ case e_APP_PIPELINE_LPM:
+ app_main_loop_worker_pipeline_lpm();
+ return 0;
+
+ case e_APP_PIPELINE_LPM_IPV6:
+ app_main_loop_worker_pipeline_lpm_ipv6();
+ return 0;
+
+ case e_APP_PIPELINE_NONE:
+ default:
+ app_main_loop_worker();
+ return 0;
+ }
+ }
+
+ if (lcore == app.core_tx) {
+ app_main_loop_tx();
+ return 0;
+ }
+
+ return 0;
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifndef APP_MBUF_ARRAY_SIZE
+#define APP_MBUF_ARRAY_SIZE 256
+#endif
+
+struct app_mbuf_array {
+ struct rte_mbuf *array[APP_MBUF_ARRAY_SIZE];
+ uint16_t n_mbufs;
+};
+
+#ifndef APP_MAX_PORTS
+#define APP_MAX_PORTS 4
+#endif
+
+struct app_params {
+ /* CPU cores */
+ uint32_t core_rx;
+ uint32_t core_worker;
+ uint32_t core_tx;
+
+ /* Ports*/
+ uint32_t ports[APP_MAX_PORTS];
+ uint32_t n_ports;
+ uint32_t port_rx_ring_size;
+ uint32_t port_tx_ring_size;
+
+ /* Rings */
+ struct rte_ring *rings_rx[APP_MAX_PORTS];
+ struct rte_ring *rings_tx[APP_MAX_PORTS];
+ uint32_t ring_rx_size;
+ uint32_t ring_tx_size;
+
+ /* Internal buffers */
+ struct app_mbuf_array mbuf_rx;
+ struct app_mbuf_array mbuf_tx[APP_MAX_PORTS];
+
+ /* Buffer pool */
+ struct rte_mempool *pool;
+ uint32_t pool_buffer_size;
+ uint32_t pool_size;
+ uint32_t pool_cache_size;
+
+ /* Burst sizes */
+ uint32_t burst_size_rx_read;
+ uint32_t burst_size_rx_write;
+ uint32_t burst_size_worker_read;
+ uint32_t burst_size_worker_write;
+ uint32_t burst_size_tx_read;
+ uint32_t burst_size_tx_write;
+
+ /* App behavior */
+ uint32_t pipeline_type;
+} __rte_cache_aligned;
+
+extern struct app_params app;
+
+int app_parse_args(int argc, char **argv);
+void app_print_usage(void);
+void app_init(void);
+int app_lcore_main_loop(void *arg);
+
+/* Pipeline */
+enum {
+ e_APP_PIPELINE_NONE = 0,
+ e_APP_PIPELINE_STUB,
+
+ e_APP_PIPELINE_HASH_KEY8_EXT,
+ e_APP_PIPELINE_HASH_KEY8_LRU,
+ e_APP_PIPELINE_HASH_KEY16_EXT,
+ e_APP_PIPELINE_HASH_KEY16_LRU,
+ e_APP_PIPELINE_HASH_KEY32_EXT,
+ e_APP_PIPELINE_HASH_KEY32_LRU,
+
+ e_APP_PIPELINE_HASH_SPEC_KEY8_EXT,
+ e_APP_PIPELINE_HASH_SPEC_KEY8_LRU,
+ e_APP_PIPELINE_HASH_SPEC_KEY16_EXT,
+ e_APP_PIPELINE_HASH_SPEC_KEY16_LRU,
+ e_APP_PIPELINE_HASH_SPEC_KEY32_EXT,
+ e_APP_PIPELINE_HASH_SPEC_KEY32_LRU,
+
+ e_APP_PIPELINE_ACL,
+ e_APP_PIPELINE_LPM,
+ e_APP_PIPELINE_LPM_IPV6,
+ e_APP_PIPELINES
+};
+
+void app_main_loop_rx(void);
+void app_main_loop_rx_metadata(void);
+uint64_t test_hash(void *key, uint32_t key_size, uint64_t seed);
+
+void app_main_loop_worker(void);
+void app_main_loop_worker_pipeline_stub(void);
+void app_main_loop_worker_pipeline_hash(void);
+void app_main_loop_worker_pipeline_acl(void);
+void app_main_loop_worker_pipeline_lpm(void);
+void app_main_loop_worker_pipeline_lpm_ipv6(void);
+
+void app_main_loop_tx(void);
+
+#define APP_FLUSH 0
+#ifndef APP_FLUSH
+#define APP_FLUSH 0x3FF
+#endif
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+
+#include <rte_port_ring.h>
+#include <rte_table_acl.h>
+#include <rte_pipeline.h>
+
+#include "main.h"
+
+enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+};
+
+/*
+ * Here we define the 'shape' of the data we're searching for,
+ * by defining the meta-data of the ACL rules.
+ * in this case, we're defining 5 tuples. IP addresses, ports,
+ * and protocol.
+ */
+struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = PROTO_FIELD_IPV4,
+ .input_index = PROTO_FIELD_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = SRC_FIELD_IPV4,
+ .input_index = SRC_FIELD_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = DST_FIELD_IPV4,
+ .input_index = DST_FIELD_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = SRCP_FIELD_IPV4,
+ .input_index = SRCP_FIELD_IPV4,
+ .offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr),
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = DSTP_FIELD_IPV4,
+ .input_index = SRCP_FIELD_IPV4,
+ .offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(uint16_t),
+ },
+};
+
+
+
+void
+app_main_loop_worker_pipeline_acl(void) {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "pipeline",
+ .socket_id = rte_socket_id(),
+ };
+
+ struct rte_pipeline *p;
+ uint32_t port_in_id[APP_MAX_PORTS];
+ uint32_t port_out_id[APP_MAX_PORTS];
+ uint32_t table_id;
+ uint32_t i;
+
+ RTE_LOG(INFO, USER1,
+ "Core %u is doing work (pipeline with ACL table)\n",
+ rte_lcore_id());
+
+ /* Pipeline configuration */
+ p = rte_pipeline_create(&pipeline_params);
+ if (p == NULL)
+ rte_panic("Unable to configure the pipeline\n");
+
+ /* Input port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_reader_params port_ring_params = {
+ .ring = app.rings_rx[i],
+ };
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = &rte_port_ring_reader_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = app.burst_size_worker_read,
+ };
+
+ if (rte_pipeline_port_in_create(p, &port_params,
+ &port_in_id[i]))
+ rte_panic("Unable to configure input port for "
+ "ring %d\n", i);
+ }
+
+ /* Output port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_writer_params port_ring_params = {
+ .ring = app.rings_tx[i],
+ .tx_burst_sz = app.burst_size_worker_write,
+ };
+
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = &rte_port_ring_writer_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .f_action_bulk = NULL,
+ .arg_ah = NULL,
+ };
+
+ if (rte_pipeline_port_out_create(p, &port_params,
+ &port_out_id[i]))
+ rte_panic("Unable to configure output port for "
+ "ring %d\n", i);
+ }
+
+ /* Table configuration */
+ {
+ struct rte_table_acl_params table_acl_params = {
+ .name = "test", /* unique identifier for acl contexts */
+ .n_rules = 1 << 5,
+ .n_rule_fields = DIM(ipv4_field_formats),
+ };
+
+ /* Copy in the rule meta-data defined above into the params */
+ memcpy(table_acl_params.field_format, ipv4_field_formats,
+ sizeof(ipv4_field_formats));
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_acl_ops,
+ .arg_create = &table_acl_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the ACL table\n");
+ }
+
+ /* Interconnecting ports and tables */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
+ table_id))
+ rte_panic("Unable to connect input port %u to "
+ "table %u\n", port_in_id[i], table_id);
+
+ /* Add entries to tables */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_pipeline_table_entry table_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = port_out_id[i & (app.n_ports - 1)]},
+ };
+ struct rte_table_acl_rule_add_params rule_params;
+ struct rte_pipeline_table_entry *entry_ptr;
+ int key_found, ret;
+
+ memset(&rule_params, 0, sizeof(rule_params));
+
+ /* Set the rule values */
+ rule_params.field_value[SRC_FIELD_IPV4].value.u32 = 0;
+ rule_params.field_value[SRC_FIELD_IPV4].mask_range.u32 = 0;
+ rule_params.field_value[DST_FIELD_IPV4].value.u32 =
+ i << (24 - __builtin_popcount(app.n_ports - 1));
+ rule_params.field_value[DST_FIELD_IPV4].mask_range.u32 =
+ 8 + __builtin_popcount(app.n_ports - 1);
+ rule_params.field_value[SRCP_FIELD_IPV4].value.u16 = 0;
+ rule_params.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
+ UINT16_MAX;
+ rule_params.field_value[DSTP_FIELD_IPV4].value.u16 = 0;
+ rule_params.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
+ UINT16_MAX;
+ rule_params.field_value[PROTO_FIELD_IPV4].value.u8 = 0;
+ rule_params.field_value[PROTO_FIELD_IPV4].mask_range.u8 = 0;
+
+ rule_params.priority = 0;
+
+ uint32_t dst_addr = rule_params.field_value[DST_FIELD_IPV4].
+ value.u32;
+ uint32_t dst_mask =
+ rule_params.field_value[DST_FIELD_IPV4].mask_range.u32;
+
+ printf("Adding rule to ACL table (IPv4 destination = "
+ "%u.%u.%u.%u/%u => port out = %u)\n",
+ (dst_addr & 0xFF000000) >> 24,
+ (dst_addr & 0x00FF0000) >> 16,
+ (dst_addr & 0x0000FF00) >> 8,
+ dst_addr & 0x000000FF,
+ dst_mask,
+ table_entry.port_id);
+
+ /* For ACL, add needs an rte_table_acl_rule_add_params struct */
+ ret = rte_pipeline_table_entry_add(p, table_id, &rule_params,
+ &table_entry, &key_found, &entry_ptr);
+ if (ret < 0)
+ rte_panic("Unable to add entry to table %u (%d)\n",
+ table_id, ret);
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_enable(p, port_in_id[i]))
+ rte_panic("Unable to enable input port %u\n",
+ port_in_id[i]);
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p) < 0)
+ rte_panic("Pipeline consistency check failed\n");
+
+ /* Run-time */
+#if APP_FLUSH == 0
+ for ( ; ; )
+ rte_pipeline_run(p);
+#else
+ for (i = 0; ; i++) {
+ rte_pipeline_run(p);
+
+ if ((i & APP_FLUSH) == 0)
+ rte_pipeline_flush(p);
+ }
+#endif
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+
+#include <rte_port_ring.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "main.h"
+
+static void
+translate_options(uint32_t *special, uint32_t *ext, uint32_t *key_size)
+{
+ switch (app.pipeline_type) {
+ case e_APP_PIPELINE_HASH_KEY8_EXT:
+ *special = 0; *ext = 1; *key_size = 8; return;
+ case e_APP_PIPELINE_HASH_KEY8_LRU:
+ *special = 0; *ext = 0; *key_size = 8; return;
+ case e_APP_PIPELINE_HASH_KEY16_EXT:
+ *special = 0; *ext = 1; *key_size = 16; return;
+ case e_APP_PIPELINE_HASH_KEY16_LRU:
+ *special = 0; *ext = 0; *key_size = 16; return;
+ case e_APP_PIPELINE_HASH_KEY32_EXT:
+ *special = 0; *ext = 1; *key_size = 32; return;
+ case e_APP_PIPELINE_HASH_KEY32_LRU:
+ *special = 0; *ext = 0; *key_size = 32; return;
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY8_EXT:
+ *special = 1; *ext = 1; *key_size = 8; return;
+ case e_APP_PIPELINE_HASH_SPEC_KEY8_LRU:
+ *special = 1; *ext = 0; *key_size = 8; return;
+ case e_APP_PIPELINE_HASH_SPEC_KEY16_EXT:
+ *special = 1; *ext = 1; *key_size = 16; return;
+ case e_APP_PIPELINE_HASH_SPEC_KEY16_LRU:
+ *special = 1; *ext = 0; *key_size = 16; return;
+ case e_APP_PIPELINE_HASH_SPEC_KEY32_EXT:
+ *special = 1; *ext = 1; *key_size = 32; return;
+ case e_APP_PIPELINE_HASH_SPEC_KEY32_LRU:
+ *special = 1; *ext = 0; *key_size = 32; return;
+
+ default:
+ rte_panic("Invalid hash table type or key size\n");
+ }
+}
+void
+app_main_loop_worker_pipeline_hash(void) {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "pipeline",
+ .socket_id = rte_socket_id(),
+ };
+
+ struct rte_pipeline *p;
+ uint32_t port_in_id[APP_MAX_PORTS];
+ uint32_t port_out_id[APP_MAX_PORTS];
+ uint32_t table_id;
+ uint32_t i;
+ uint32_t special, ext, key_size;
+
+ translate_options(&special, &ext, &key_size);
+
+ RTE_LOG(INFO, USER1, "Core %u is doing work "
+ "(pipeline with hash table, %s, %s, %d-byte key)\n",
+ rte_lcore_id(),
+ special ? "specialized" : "non-specialized",
+ ext ? "extendible bucket" : "LRU",
+ key_size);
+
+ /* Pipeline configuration */
+ p = rte_pipeline_create(&pipeline_params);
+ if (p == NULL)
+ rte_panic("Unable to configure the pipeline\n");
+
+ /* Input port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_reader_params port_ring_params = {
+ .ring = app.rings_rx[i],
+ };
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = &rte_port_ring_reader_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = app.burst_size_worker_read,
+ };
+
+ if (rte_pipeline_port_in_create(p, &port_params,
+ &port_in_id[i]))
+ rte_panic("Unable to configure input port for "
+ "ring %d\n", i);
+ }
+
+ /* Output port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_writer_params port_ring_params = {
+ .ring = app.rings_tx[i],
+ .tx_burst_sz = app.burst_size_worker_write,
+ };
+
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = &rte_port_ring_writer_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .f_action_bulk = NULL,
+ .arg_ah = NULL,
+ };
+
+ if (rte_pipeline_port_out_create(p, &port_params,
+ &port_out_id[i]))
+ rte_panic("Unable to configure output port for "
+ "ring %d\n", i);
+ }
+
+ /* Table configuration */
+ switch (app.pipeline_type) {
+ case e_APP_PIPELINE_HASH_KEY8_EXT:
+ case e_APP_PIPELINE_HASH_KEY16_EXT:
+ case e_APP_PIPELINE_HASH_KEY32_EXT:
+ {
+ struct rte_table_hash_ext_params table_hash_params = {
+ .key_size = key_size,
+ .n_keys = 1 << 24,
+ .n_buckets = 1 << 22,
+ .n_buckets_ext = 1 << 21,
+ .f_hash = test_hash,
+ .seed = 0,
+ .signature_offset = 0,
+ .key_offset = 32,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_ext_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+ case e_APP_PIPELINE_HASH_KEY8_LRU:
+ case e_APP_PIPELINE_HASH_KEY16_LRU:
+ case e_APP_PIPELINE_HASH_KEY32_LRU:
+ {
+ struct rte_table_hash_lru_params table_hash_params = {
+ .key_size = key_size,
+ .n_keys = 1 << 24,
+ .n_buckets = 1 << 22,
+ .f_hash = test_hash,
+ .seed = 0,
+ .signature_offset = 0,
+ .key_offset = 32,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_lru_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY8_EXT:
+ {
+ struct rte_table_hash_key8_ext_params table_hash_params = {
+ .n_entries = 1 << 24,
+ .n_entries_ext = 1 << 23,
+ .signature_offset = 0,
+ .key_offset = 32,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key8_ext_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY8_LRU:
+ {
+ struct rte_table_hash_key8_lru_params table_hash_params = {
+ .n_entries = 1 << 24,
+ .signature_offset = 0,
+ .key_offset = 32,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key8_lru_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY16_EXT:
+ {
+ struct rte_table_hash_key16_ext_params table_hash_params = {
+ .n_entries = 1 << 24,
+ .n_entries_ext = 1 << 23,
+ .signature_offset = 0,
+ .key_offset = 32,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key16_ext_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table)\n");
+ }
+ break;
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY16_LRU:
+ {
+ struct rte_table_hash_key16_lru_params table_hash_params = {
+ .n_entries = 1 << 24,
+ .signature_offset = 0,
+ .key_offset = 32,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key16_lru_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY32_EXT:
+ {
+ struct rte_table_hash_key32_ext_params table_hash_params = {
+ .n_entries = 1 << 24,
+ .n_entries_ext = 1 << 23,
+ .signature_offset = 0,
+ .key_offset = 32,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key32_ext_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+
+ case e_APP_PIPELINE_HASH_SPEC_KEY32_LRU:
+ {
+ struct rte_table_hash_key32_lru_params table_hash_params = {
+ .n_entries = 1 << 24,
+ .signature_offset = 0,
+ .key_offset = 32,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key32_lru_ops,
+ .arg_create = &table_hash_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the hash table\n");
+ }
+ break;
+
+ default:
+ rte_panic("Invalid hash table type or key size\n");
+ }
+
+ /* Interconnecting ports and tables */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
+ table_id))
+ rte_panic("Unable to connect input port %u to "
+ "table %u\n", port_in_id[i], table_id);
+
+ /* Add entries to tables */
+ for (i = 0; i < (1 << 24); i++) {
+ struct rte_pipeline_table_entry entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = port_out_id[i & (app.n_ports - 1)]},
+ };
+ struct rte_pipeline_table_entry *entry_ptr;
+ uint8_t key[32];
+ uint32_t *k32 = (uint32_t *) key;
+ int key_found, status;
+
+ memset(key, 0, sizeof(key));
+ k32[0] = rte_be_to_cpu_32(i);
+
+ status = rte_pipeline_table_entry_add(p, table_id, key, &entry,
+ &key_found, &entry_ptr);
+ if (status < 0)
+ rte_panic("Unable to add entry to table %u (%d)\n",
+ table_id, status);
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_enable(p, port_in_id[i]))
+ rte_panic("Unable to enable input port %u\n",
+ port_in_id[i]);
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p) < 0)
+ rte_panic("Pipeline consistency check failed\n");
+
+ /* Run-time */
+#if APP_FLUSH == 0
+ for ( ; ; )
+ rte_pipeline_run(p);
+#else
+ for (i = 0; ; i++) {
+ rte_pipeline_run(p);
+
+ if ((i & APP_FLUSH) == 0)
+ rte_pipeline_flush(p);
+ }
+#endif
+}
+
+uint64_t test_hash(
+ void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint64_t seed)
+{
+ uint32_t *k32 = (uint32_t *) key;
+ uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
+ uint64_t signature = (ip_dst >> 2) | ((ip_dst & 0x3) << 30);
+
+ return signature;
+}
+
+void
+app_main_loop_rx_metadata(void) {
+ uint32_t i, j;
+ int ret;
+
+ RTE_LOG(INFO, USER1, "Core %u is doing RX (with meta-data)\n",
+ rte_lcore_id());
+
+ for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
+ uint16_t n_mbufs;
+
+ n_mbufs = rte_eth_rx_burst(
+ app.ports[i],
+ 0,
+ app.mbuf_rx.array,
+ app.burst_size_rx_read);
+
+ if (n_mbufs == 0)
+ continue;
+
+ for (j = 0; j < n_mbufs; j++) {
+ struct rte_mbuf *m;
+ uint8_t *m_data, *key;
+ struct ipv4_hdr *ip_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ uint32_t ip_dst;
+ uint8_t *ipv6_dst;
+ uint32_t *signature, *k32;
+
+ m = app.mbuf_rx.array[j];
+ m_data = rte_pktmbuf_mtod(m, uint8_t *);
+ signature = RTE_MBUF_METADATA_UINT32_PTR(m, 0);
+ key = RTE_MBUF_METADATA_UINT8_PTR(m, 32);
+
+ if (m->ol_flags & PKT_RX_IPV4_HDR) {
+ ip_hdr = (struct ipv4_hdr *)
+ &m_data[sizeof(struct ether_hdr)];
+ ip_dst = ip_hdr->dst_addr;
+
+ k32 = (uint32_t *) key;
+ k32[0] = ip_dst & 0xFFFFFF00;
+ } else {
+ ipv6_hdr = (struct ipv6_hdr *)
+ &m_data[sizeof(struct ether_hdr)];
+ ipv6_dst = ipv6_hdr->dst_addr;
+
+ memcpy(key, ipv6_dst, 16);
+ }
+
+ *signature = test_hash(key, 0, 0);
+ }
+
+ do {
+ ret = rte_ring_sp_enqueue_bulk(
+ app.rings_rx[i],
+ (void **) app.mbuf_rx.array,
+ n_mbufs);
+ } while (ret < 0);
+ }
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+
+#include <rte_port_ring.h>
+#include <rte_table_lpm.h>
+#include <rte_pipeline.h>
+
+#include "main.h"
+
+void
+app_main_loop_worker_pipeline_lpm(void) {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "pipeline",
+ .socket_id = rte_socket_id(),
+ };
+
+ struct rte_pipeline *p;
+ uint32_t port_in_id[APP_MAX_PORTS];
+ uint32_t port_out_id[APP_MAX_PORTS];
+ uint32_t table_id;
+ uint32_t i;
+
+ RTE_LOG(INFO, USER1, "Core %u is doing work (pipeline with "
+ "LPM table)\n", rte_lcore_id());
+
+ /* Pipeline configuration */
+ p = rte_pipeline_create(&pipeline_params);
+ if (p == NULL)
+ rte_panic("Unable to configure the pipeline\n");
+
+ /* Input port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_reader_params port_ring_params = {
+ .ring = app.rings_rx[i],
+ };
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = &rte_port_ring_reader_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = app.burst_size_worker_read,
+ };
+
+ if (rte_pipeline_port_in_create(p, &port_params,
+ &port_in_id[i]))
+ rte_panic("Unable to configure input port for "
+ "ring %d\n", i);
+ }
+
+ /* Output port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_writer_params port_ring_params = {
+ .ring = app.rings_tx[i],
+ .tx_burst_sz = app.burst_size_worker_write,
+ };
+
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = &rte_port_ring_writer_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .f_action_bulk = NULL,
+ .arg_ah = NULL,
+ };
+
+ if (rte_pipeline_port_out_create(p, &port_params,
+ &port_out_id[i]))
+ rte_panic("Unable to configure output port for "
+ "ring %d\n", i);
+ }
+
+ /* Table configuration */
+ {
+ struct rte_table_lpm_params table_lpm_params = {
+ .n_rules = 1 << 24,
+ .entry_unique_size =
+ sizeof(struct rte_pipeline_table_entry),
+ .offset = 32,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_lpm_ops,
+ .arg_create = &table_lpm_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the LPM table\n");
+ }
+
+ /* Interconnecting ports and tables */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
+ table_id))
+ rte_panic("Unable to connect input port %u to "
+ "table %u\n", port_in_id[i], table_id);
+
+ /* Add entries to tables */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_pipeline_table_entry entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = port_out_id[i & (app.n_ports - 1)]},
+ };
+
+ struct rte_table_lpm_key key = {
+ .ip = i << (24 - __builtin_popcount(app.n_ports - 1)),
+ .depth = 8 + __builtin_popcount(app.n_ports - 1),
+ };
+
+ struct rte_pipeline_table_entry *entry_ptr;
+
+ int key_found, status;
+
+ printf("Adding rule to LPM table (IPv4 destination = %"
+ PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 "/%" PRIu8
+ " => port out = %" PRIu32 ")\n",
+ (key.ip & 0xFF000000) >> 24,
+ (key.ip & 0x00FF0000) >> 16,
+ (key.ip & 0x0000FF00) >> 8,
+ key.ip & 0x000000FF,
+ key.depth,
+ i);
+
+ status = rte_pipeline_table_entry_add(p, table_id, &key, &entry,
+ &key_found, &entry_ptr);
+ if (status < 0)
+ rte_panic("Unable to add entry to table %u (%d)\n",
+ table_id, status);
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_enable(p, port_in_id[i]))
+ rte_panic("Unable to enable input port %u\n",
+ port_in_id[i]);
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p) < 0)
+ rte_panic("Pipeline consistency check failed\n");
+
+ /* Run-time */
+#if APP_FLUSH == 0
+ for ( ; ; )
+ rte_pipeline_run(p);
+#else
+ for (i = 0; ; i++) {
+ rte_pipeline_run(p);
+
+ if ((i & APP_FLUSH) == 0)
+ rte_pipeline_flush(p);
+ }
+#endif
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+
+#include <rte_port_ring.h>
+#include <rte_table_lpm_ipv6.h>
+#include <rte_pipeline.h>
+
+#include "main.h"
+
+void
+app_main_loop_worker_pipeline_lpm_ipv6(void) {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "pipeline",
+ .socket_id = rte_socket_id(),
+ };
+
+ struct rte_pipeline *p;
+ uint32_t port_in_id[APP_MAX_PORTS];
+ uint32_t port_out_id[APP_MAX_PORTS];
+ uint32_t table_id;
+ uint32_t i;
+
+ RTE_LOG(INFO, USER1,
+ "Core %u is doing work (pipeline with IPv6 LPM table)\n",
+ rte_lcore_id());
+
+ /* Pipeline configuration */
+ p = rte_pipeline_create(&pipeline_params);
+ if (p == NULL)
+ rte_panic("Unable to configure the pipeline\n");
+
+ /* Input port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_reader_params port_ring_params = {
+ .ring = app.rings_rx[i],
+ };
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = &rte_port_ring_reader_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = app.burst_size_worker_read,
+ };
+
+ if (rte_pipeline_port_in_create(p, &port_params,
+ &port_in_id[i]))
+ rte_panic("Unable to configure input port for "
+ "ring %d\n", i);
+ }
+
+ /* Output port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_writer_params port_ring_params = {
+ .ring = app.rings_tx[i],
+ .tx_burst_sz = app.burst_size_worker_write,
+ };
+
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = &rte_port_ring_writer_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .f_action_bulk = NULL,
+ .arg_ah = NULL,
+ };
+
+ if (rte_pipeline_port_out_create(p, &port_params,
+ &port_out_id[i]))
+ rte_panic("Unable to configure output port for "
+ "ring %d\n", i);
+ }
+
+ /* Table configuration */
+ {
+ struct rte_table_lpm_ipv6_params table_lpm_ipv6_params = {
+ .n_rules = 1 << 24,
+ .number_tbl8s = 1 << 21,
+ .entry_unique_size =
+ sizeof(struct rte_pipeline_table_entry),
+ .offset = 32,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_lpm_ipv6_ops,
+ .arg_create = &table_lpm_ipv6_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id))
+ rte_panic("Unable to configure the IPv6 LPM table\n");
+ }
+
+ /* Interconnecting ports and tables */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
+ table_id))
+ rte_panic("Unable to connect input port %u to "
+ "table %u\n", port_in_id[i], table_id);
+
+ /* Add entries to tables */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_pipeline_table_entry entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = port_out_id[i & (app.n_ports - 1)]},
+ };
+
+ struct rte_table_lpm_ipv6_key key;
+ struct rte_pipeline_table_entry *entry_ptr;
+ uint32_t ip;
+ int key_found, status;
+
+ key.depth = 8 + __builtin_popcount(app.n_ports - 1);
+
+ ip = rte_bswap32(i << (24 -
+ __builtin_popcount(app.n_ports - 1)));
+ memcpy(key.ip, &ip, sizeof(uint32_t));
+
+ printf("Adding rule to IPv6 LPM table (IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u => "
+ "port out = %u)\n",
+ key.ip[0], key.ip[1], key.ip[2], key.ip[3],
+ key.ip[4], key.ip[5], key.ip[6], key.ip[7],
+ key.ip[8], key.ip[9], key.ip[10], key.ip[11],
+ key.ip[12], key.ip[13], key.ip[14], key.ip[15],
+ key.depth, i);
+
+ status = rte_pipeline_table_entry_add(p, table_id, &key, &entry,
+ &key_found, &entry_ptr);
+ if (status < 0)
+ rte_panic("Unable to add entry to table %u (%d)\n",
+ table_id, status);
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_enable(p, port_in_id[i]))
+ rte_panic("Unable to enable input port %u\n",
+ port_in_id[i]);
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p) < 0)
+ rte_panic("Pipeline consistency check failed\n");
+
+ /* Run-time */
+#if APP_FLUSH == 0
+ for ( ; ; )
+ rte_pipeline_run(p);
+#else
+ for (i = 0; ; i++) {
+ rte_pipeline_run(p);
+
+ if ((i & APP_FLUSH) == 0)
+ rte_pipeline_flush(p);
+ }
+#endif
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_port_ring.h>
+#include <rte_table_stub.h>
+#include <rte_pipeline.h>
+
+#include "main.h"
+
+void
+app_main_loop_worker_pipeline_stub(void) {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "pipeline",
+ .socket_id = rte_socket_id(),
+ };
+
+ struct rte_pipeline *p;
+ uint32_t port_in_id[APP_MAX_PORTS];
+ uint32_t port_out_id[APP_MAX_PORTS];
+ uint32_t table_id[APP_MAX_PORTS];
+ uint32_t i;
+
+ RTE_LOG(INFO, USER1, "Core %u is doing work (pipeline with stub "
+ "tables)\n", rte_lcore_id());
+
+ /* Pipeline configuration */
+ p = rte_pipeline_create(&pipeline_params);
+ if (p == NULL)
+ rte_panic("Unable to configure the pipeline\n");
+
+ /* Input port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_reader_params port_ring_params = {
+ .ring = app.rings_rx[i],
+ };
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = &rte_port_ring_reader_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = app.burst_size_worker_read,
+ };
+
+ if (rte_pipeline_port_in_create(p, &port_params,
+ &port_in_id[i]))
+ rte_panic("Unable to configure input port for "
+ "ring %d\n", i);
+ }
+
+ /* Output port configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_port_ring_writer_params port_ring_params = {
+ .ring = app.rings_tx[i],
+ .tx_burst_sz = app.burst_size_worker_write,
+ };
+
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = &rte_port_ring_writer_ops,
+ .arg_create = (void *) &port_ring_params,
+ .f_action = NULL,
+ .f_action_bulk = NULL,
+ .arg_ah = NULL,
+ };
+
+ if (rte_pipeline_port_out_create(p, &port_params,
+ &port_out_id[i]))
+ rte_panic("Unable to configure output port for "
+ "ring %d\n", i);
+ }
+
+ /* Table configuration */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
+ rte_panic("Unable to configure table %u\n", i);
+ }
+
+ /* Interconnecting ports and tables */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
+ table_id[i]))
+ rte_panic("Unable to connect input port %u to "
+ "table %u\n", port_in_id[i], table_id[i]);
+
+ /* Add entries to tables */
+ for (i = 0; i < app.n_ports; i++) {
+ struct rte_pipeline_table_entry entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = port_out_id[i ^ 1]},
+ };
+ struct rte_pipeline_table_entry *default_entry_ptr;
+
+ if (rte_pipeline_table_default_entry_add(p, table_id[i], &entry,
+ &default_entry_ptr))
+ rte_panic("Unable to add default entry to table %u\n",
+ table_id[i]);
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < app.n_ports; i++)
+ if (rte_pipeline_port_in_enable(p, port_in_id[i]))
+ rte_panic("Unable to enable input port %u\n",
+ port_in_id[i]);
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p) < 0)
+ rte_panic("Pipeline consistency check failed\n");
+
+ /* Run-time */
+#if APP_FLUSH == 0
+ for ( ; ; )
+ rte_pipeline_run(p);
+#else
+ for (i = 0; ; i++) {
+ rte_pipeline_run(p);
+
+ if ((i & APP_FLUSH) == 0)
+ rte_pipeline_flush(p);
+ }
+#endif
+}
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+#include <rte_malloc.h>
+
+#include "main.h"
+
+void
+app_main_loop_rx(void) {
+ uint32_t i;
+ int ret;
+
+ RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id());
+
+ for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
+ uint16_t n_mbufs;
+
+ n_mbufs = rte_eth_rx_burst(
+ app.ports[i],
+ 0,
+ app.mbuf_rx.array,
+ app.burst_size_rx_read);
+
+ if (n_mbufs == 0)
+ continue;
+
+ do {
+ ret = rte_ring_sp_enqueue_bulk(
+ app.rings_rx[i],
+ (void **) app.mbuf_rx.array,
+ n_mbufs);
+ } while (ret < 0);
+ }
+}
+
+void
+app_main_loop_worker(void) {
+ struct app_mbuf_array *worker_mbuf;
+ uint32_t i;
+
+ RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
+ rte_lcore_id());
+
+ worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
+ CACHE_LINE_SIZE, rte_socket_id());
+ if (worker_mbuf == NULL)
+ rte_panic("Worker thread: cannot allocate buffer space\n");
+
+ for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
+ int ret;
+
+ ret = rte_ring_sc_dequeue_bulk(
+ app.rings_rx[i],
+ (void **) worker_mbuf->array,
+ app.burst_size_worker_read);
+
+ if (ret == -ENOENT)
+ continue;
+
+ do {
+ ret = rte_ring_sp_enqueue_bulk(
+ app.rings_tx[i ^ 1],
+ (void **) worker_mbuf->array,
+ app.burst_size_worker_write);
+ } while (ret < 0);
+ }
+}
+
+void
+app_main_loop_tx(void) {
+ uint32_t i;
+
+ RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id());
+
+ for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
+ uint16_t n_mbufs, n_pkts;
+ int ret;
+
+ n_mbufs = app.mbuf_tx[i].n_mbufs;
+
+ ret = rte_ring_sc_dequeue_bulk(
+ app.rings_tx[i],
+ (void **) &app.mbuf_tx[i].array[n_mbufs],
+ app.burst_size_tx_read);
+
+ if (ret == -ENOENT)
+ continue;
+
+ n_mbufs += app.burst_size_tx_read;
+
+ if (n_mbufs < app.burst_size_tx_write) {
+ app.mbuf_tx[i].n_mbufs = n_mbufs;
+ continue;
+ }
+
+ n_pkts = rte_eth_tx_burst(
+ app.ports[i],
+ 0,
+ app.mbuf_tx[i].array,
+ n_mbufs);
+
+ if (n_pkts < n_mbufs) {
+ uint16_t k;
+
+ for (k = n_pkts; k < n_mbufs; k++) {
+ struct rte_mbuf *pkt_to_free;
+
+ pkt_to_free = app.mbuf_tx[i].array[k];
+ rte_pktmbuf_free(pkt_to_free);
+ }
+ }
+
+ app.mbuf_tx[i].n_mbufs = 0;
+ }
+}