-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdio.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
+#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
#include <rte_string_fns.h>
#include <rte_timer.h>
#include <rte_power.h>
+#include <rte_spinlock.h>
#define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1
#define MAX_PKT_BURST 32
-#define MIN_ZERO_POLL_COUNT 5
+#define MIN_ZERO_POLL_COUNT 10
-/* around 100ms at 2 Ghz */
-#define TIMER_RESOLUTION_CYCLES 200000000ULL
/* 100 ms interval */
#define TIMER_NUMBER_PER_SECOND 10
/* 100000 us */
#define MEMPOOL_CACHE_SIZE 256
-#define MBUF_DATA_SIZE (2048 + RTE_PKTMBUF_HEADROOM)
-
/*
* This expression is used to calculate the number of mbufs needed depending on
* user input, taking into account memory for rx and tx hardware rings, cache
*/
#define NB_MBUF RTE_MAX ( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
+ (nb_ports*nb_rx_queue*nb_rxd + \
nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
+ nb_ports*n_tx_queue*nb_txd + \
nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
/*
* Configurable number of RX/TX ring descriptors
*/
-#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_RX_DESC_DEFAULT 512
#define RTE_TEST_TX_DESC_DEFAULT 512
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
/* ethernet addresses of ports */
static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+/* ethernet addresses of ports */
+static rte_spinlock_t locks[RTE_MAX_ETHPORTS];
+
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
/* Ports set in promiscuous mode off by default. */
static int promiscuous_on = 0;
/* NUMA is enabled by default. */
static int numa_on = 1;
+static int parse_ptype; /**< Parse packet type using rx callback, and */
+ /**< disabled by default */
enum freq_scale_hint_t
{
FREQ_HIGHEST = 2
};
-struct mbuf_table {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST];
-};
-
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
#define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
#define MAX_RX_QUEUE_PER_PORT 128
+#define MAX_RX_QUEUE_INTERRUPT_PER_PORT 16
+
+
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = ETH_RSS_UDP,
},
},
.txmode = {
- .mq_mode = ETH_DCB_NONE,
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ .intr_conf = {
+ .rxq = 1,
},
};
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
#else
#define IPV6_L3FWD_NUM_ROUTES \
(sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
-static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
struct lcore_conf {
uint16_t n_rx_queue;
struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t n_tx_port;
+ uint16_t tx_port_id[RTE_MAX_ETHPORTS];
uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
+ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
lookup_struct_t * ipv4_lookup_struct;
lookup_struct_t * ipv6_lookup_struct;
} __rte_cache_aligned;
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
- unsigned lcore_id, uint8_t port_id, uint16_t queue_id);
+ unsigned int lcore_id, uint16_t port_id, uint16_t queue_id);
/* exit signal handler */
static void
signal_exit_now(int sigtype)
{
unsigned lcore_id;
+ unsigned int portid, nb_ports;
int ret;
if (sigtype == SIGINT) {
"library de-initialization failed on "
"core%u\n", lcore_id);
}
+
+ nb_ports = rte_eth_dev_count();
+ for (portid = 0; portid < nb_ports; portid++) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ rte_eth_dev_stop(portid);
+ rte_eth_dev_close(portid);
+ }
}
rte_exit(EXIT_SUCCESS, "User forced exit\n");
/* accumulate total execution time in us when callback is invoked */
sleep_time_ratio = (float)(stats[lcore_id].sleep_time) /
(float)SCALING_PERIOD;
-
/**
* check whether need to scale down frequency a step if it sleep a lot.
*/
- if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD)
- rte_power_freq_down(lcore_id);
+ if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) {
+ if (rte_power_freq_down)
+ rte_power_freq_down(lcore_id);
+ }
else if ( (unsigned)(stats[lcore_id].nb_rx_processed /
- stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST)
+ stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) {
/**
* scale down a step if average packet per iteration less
* than expectation.
*/
- rte_power_freq_down(lcore_id);
+ if (rte_power_freq_down)
+ rte_power_freq_down(lcore_id);
+ }
/**
* initialize another timer according to current frequency to ensure
stats[lcore_id].sleep_time = 0;
}
-/* Send burst of packets on an output interface */
-static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
-{
- struct rte_mbuf **m_table;
- int ret;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
-
- ret = rte_eth_tx_burst(port, queueid, m_table, n);
- if (unlikely(ret < n)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < n);
- }
-
- return 0;
-}
-
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
- uint16_t len;
struct lcore_conf *qconf;
lcore_id = rte_lcore_id();
-
qconf = &lcore_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
- qconf->tx_mbufs[port].len = len;
+ rte_eth_tx_buffer(port, qconf->tx_queue_id[port],
+ qconf->tx_buffer[port], m);
+
return 0;
}
key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t * ipv4_l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
struct ipv6_5tuple key;
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
- uint8_t next_hop;
+ uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
next_hop : portid);
}
#endif
static inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
+parse_ptype_one(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ unsigned int i;
+
+ for (i = 0; i < nb_pkts; ++i)
+ parse_ptype_one(pkts[i]);
+
+ return nb_pkts;
+}
+
+static int
+add_cb_parse_ptype(uint16_t portid, uint16_t queueid)
+{
+ printf("Port %d: softly parse packet type info\n", portid);
+ if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL))
+ return 0;
+
+ printf("Failed to add rx callback: port=%d\n", portid);
+ return -1;
+}
+
+static inline void
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *d_addr_bytes;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- if (m->ol_flags & PKT_RX_IPV4_HDR) {
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
/* Handle IPv4 headers.*/
ipv4_hdr =
- (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char*)
- + sizeof(struct ether_hdr));
+ rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
send_single_packet(m, dst_port);
- }
- else {
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* Handle IPv6 headers.*/
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
struct ipv6_hdr *ipv6_hdr;
ipv6_hdr =
- (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char*)
- + sizeof(struct ether_hdr));
+ rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
dst_port = get_ipv6_dst_port(ipv6_hdr, portid,
qconf->ipv6_lookup_struct);
/* We don't currently handle IPv6 packets in LPM mode. */
rte_pktmbuf_free(m);
#endif
- }
+ } else
+ rte_pktmbuf_free(m);
}
-#define SLEEP_GEAR1_THRESHOLD 100
-#define SLEEP_GEAR2_THRESHOLD 1000
+#define MINIMUM_SLEEP_TIME 1
+#define SUSPEND_THRESHOLD 300
static inline uint32_t
power_idle_heuristic(uint32_t zero_rx_packet_count)
{
- /* If zero count is less than 100, use it as the sleep time in us */
- if (zero_rx_packet_count < SLEEP_GEAR1_THRESHOLD)
- return zero_rx_packet_count;
- /* If zero count is less than 1000, sleep time should be 100 us */
- else if ((zero_rx_packet_count >= SLEEP_GEAR1_THRESHOLD) &&
- (zero_rx_packet_count < SLEEP_GEAR2_THRESHOLD))
- return SLEEP_GEAR1_THRESHOLD;
- /* If zero count is greater than 1000, sleep time should be 1000 us */
- else if (zero_rx_packet_count >= SLEEP_GEAR2_THRESHOLD)
- return SLEEP_GEAR2_THRESHOLD;
-
- return 0;
+ /* If zero count is less than 100, sleep 1us */
+ if (zero_rx_packet_count < SUSPEND_THRESHOLD)
+ return MINIMUM_SLEEP_TIME;
+ /* If zero count is less than 1000, sleep 100 us which is the
+ minimum latency switching from C3/C6 to C0
+ */
+ else
+ return SUSPEND_THRESHOLD;
}
static inline enum freq_scale_hint_t
power_freq_scaleup_heuristic(unsigned lcore_id,
- uint8_t port_id,
+ uint16_t port_id,
uint16_t queue_id)
{
+ uint32_t rxq_count = rte_eth_rx_queue_count(port_id, queue_id);
/**
* HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries
* per iteration
#define FREQ_UP_TREND2_ACC 100
#define FREQ_UP_THRESHOLD 10000
- if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
- FREQ_GEAR3_RX_PACKET_THRESHOLD) > 0)) {
+ if (likely(rxq_count > FREQ_GEAR3_RX_PACKET_THRESHOLD)) {
stats[lcore_id].trend = 0;
return FREQ_HIGHEST;
- } else if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
- FREQ_GEAR2_RX_PACKET_THRESHOLD) > 0))
+ } else if (likely(rxq_count > FREQ_GEAR2_RX_PACKET_THRESHOLD))
stats[lcore_id].trend += FREQ_UP_TREND2_ACC;
- else if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
- FREQ_GEAR1_RX_PACKET_THRESHOLD) > 0))
+ else if (likely(rxq_count > FREQ_GEAR1_RX_PACKET_THRESHOLD))
stats[lcore_id].trend += FREQ_UP_TREND1_ACC;
if (likely(stats[lcore_id].trend > FREQ_UP_THRESHOLD)) {
return FREQ_CURRENT;
}
+/**
+ * force polling thread sleep until one-shot rx interrupt triggers
+ * @param port_id
+ * Port id.
+ * @param queue_id
+ * Rx queue id.
+ * @return
+ * 0 on success
+ */
+static int
+sleep_until_rx_interrupt(int num)
+{
+ struct rte_epoll_event event[num];
+ int n, i;
+ uint16_t port_id;
+ uint8_t queue_id;
+ void *data;
+
+ RTE_LOG(INFO, L3FWD_POWER,
+ "lcore %u sleeps until interrupt triggers\n",
+ rte_lcore_id());
+
+ n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, -1);
+ for (i = 0; i < n; i++) {
+ data = event[i].epdata.data;
+ port_id = ((uintptr_t)data) >> CHAR_BIT;
+ queue_id = ((uintptr_t)data) &
+ RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ rte_eth_dev_rx_intr_disable(port_id, queue_id);
+ RTE_LOG(INFO, L3FWD_POWER,
+ "lcore %u is waked up from rx interrupt on"
+ " port %d queue %d\n",
+ rte_lcore_id(), port_id, queue_id);
+ }
+
+ return 0;
+}
+
+static void turn_on_intr(struct lcore_conf *qconf)
+{
+ int i;
+ struct lcore_rx_queue *rx_queue;
+ uint8_t queue_id;
+ uint16_t port_id;
+
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ port_id = rx_queue->port_id;
+ queue_id = rx_queue->queue_id;
+
+ rte_spinlock_lock(&(locks[port_id]));
+ rte_eth_dev_rx_intr_enable(port_id, queue_id);
+ rte_spinlock_unlock(&(locks[port_id]));
+ }
+}
+
+static int event_register(struct lcore_conf *qconf)
+{
+ struct lcore_rx_queue *rx_queue;
+ uint8_t queueid;
+ uint16_t portid;
+ uint32_t data;
+ int ret;
+ int i;
+
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+ data = portid << CHAR_BIT | queueid;
+
+ ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
+ RTE_EPOLL_PER_THREAD,
+ RTE_INTR_EVENT_ADD,
+ (void *)((uintptr_t)data));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/* main processing loop */
static int
main_loop(__attribute__((unused)) void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
- uint64_t prev_tsc, diff_tsc, cur_tsc;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
-
uint32_t lcore_rx_idle_count = 0;
uint32_t lcore_idle_hint = 0;
+ int intr_en = 0;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
prev_tsc = 0;
+ hz = rte_get_timer_hz();
+ tim_res_tsc = hz/TIMER_NUMBER_PER_SECOND;
lcore_id = rte_lcore_id();
qconf = &lcore_conf[lcore_id];
RTE_LOG(INFO, L3FWD_POWER, "entering main loop on lcore %u\n", lcore_id);
for (i = 0; i < qconf->n_rx_queue; i++) {
-
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu "
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
"rxqueueid=%hhu\n", lcore_id, portid, queueid);
}
+ /* add into event wait list */
+ if (event_register(qconf) == 0)
+ intr_en = 1;
+ else
+ RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n");
+
while (1) {
stats[lcore_id].nb_iteration_looped++;
*/
diff_tsc = cur_tsc - prev_tsc;
if (unlikely(diff_tsc > drain_tsc)) {
-
- /*
- * This could be optimized (use queueid instead of
- * portid), but it is not called so often
- */
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_mbufs[portid].len == 0)
- continue;
- send_burst(&lcore_conf[lcore_id],
- qconf->tx_mbufs[portid].len,
- portid);
- qconf->tx_mbufs[portid].len = 0;
+ for (i = 0; i < qconf->n_tx_port; ++i) {
+ portid = qconf->tx_port_id[i];
+ rte_eth_tx_buffer_flush(portid,
+ qconf->tx_queue_id[portid],
+ qconf->tx_buffer[portid]);
}
-
prev_tsc = cur_tsc;
}
diff_tsc_power = cur_tsc_power - prev_tsc_power;
- if (diff_tsc_power > TIMER_RESOLUTION_CYCLES) {
+ if (diff_tsc_power > tim_res_tsc) {
rte_timer_manage();
prev_tsc_power = cur_tsc_power;
}
+start_rx:
/*
* Read packet from RX queues
*/
nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
MAX_PKT_BURST);
+
stats[lcore_id].nb_rx_processed += nb_rx;
if (unlikely(nb_rx == 0)) {
/**
rx_queue->freq_up_hint =
power_freq_scaleup_heuristic(lcore_id,
portid, queueid);
- }
+ }
/* Prefetch first packets */
for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
rx_queue->freq_up_hint;
}
- if (lcore_scaleup_hint == FREQ_HIGHEST)
- rte_power_freq_max(lcore_id);
- else if (lcore_scaleup_hint == FREQ_HIGHER)
- rte_power_freq_up(lcore_id);
+ if (lcore_scaleup_hint == FREQ_HIGHEST) {
+ if (rte_power_freq_max)
+ rte_power_freq_max(lcore_id);
+ } else if (lcore_scaleup_hint == FREQ_HIGHER) {
+ if (rte_power_freq_up)
+ rte_power_freq_up(lcore_id);
+ }
} else {
/**
* All Rx queues empty in recent consecutive polls,
* sleep in a conservative manner, meaning sleep as
- * less as possible.
- */
+ * less as possible.
+ */
for (i = 1, lcore_idle_hint =
qconf->rx_queue_list[0].idle_hint;
i < qconf->n_rx_queue; ++i) {
lcore_idle_hint = rx_queue->idle_hint;
}
- if ( lcore_idle_hint < SLEEP_GEAR1_THRESHOLD)
+ if (lcore_idle_hint < SUSPEND_THRESHOLD)
/**
* execute "pause" instruction to avoid context
- * switch for short sleep.
- */
+ * switch which generally take hundred of
+ * microseconds for short sleep.
+ */
rte_delay_us(lcore_idle_hint);
- else
- /* long sleep force runing thread to suspend */
- usleep(lcore_idle_hint);
-
+ else {
+ /* suspend until rx interrupt trigges */
+ if (intr_en) {
+ turn_on_intr(qconf);
+ sleep_until_rx_interrupt(
+ qconf->n_rx_queue);
+ /**
+ * start receiving packets immediately
+ */
+ goto start_rx;
+ }
+ }
stats[lcore_id].sleep_time += lcore_idle_hint;
}
}
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
" --config (port,queue,lcore): rx queues configuration\n"
" --no-numa: optional, disable numa awareness\n"
" --enable-jumbo: enable jumbo frame"
- " which max packet len is PKTLEN in decimal (64-9600)\n",
+ " which max packet len is PKTLEN in decimal (64-9600)\n"
+ " --parse-ptype: parse packet type by software\n",
prgname);
}
return 0;
}
+#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+
/* Parse the argument given in the command line of the application */
static int
parse_args(int argc, char **argv)
{"config", 1, 0, 0},
{"no-numa", 0, 0, 0},
{"enable-jumbo", 0, 0, 0},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{NULL, 0, 0, 0}
};
(unsigned int)port_conf.rxmode.max_rx_pkt_len);
}
+ if (!strncmp(lgopts[option_index].name,
+ CMD_LINE_OPT_PARSE_PTYPE,
+ sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
+ printf("soft parse-ptype is enabled\n");
+ parse_ptype = 1;
+ }
+
break;
default:
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
struct rte_hash_parameters ipv4_l3fwd_hash_params = {
.name = NULL,
.entries = L3FWD_HASH_ENTRIES,
- .bucket_entries = 4,
.key_len = sizeof(struct ipv4_5tuple),
.hash_func = DEFAULT_HASH_FUNC,
.hash_func_init_val = 0,
struct rte_hash_parameters ipv6_l3fwd_hash_params = {
.name = NULL,
.entries = L3FWD_HASH_ENTRIES,
- .bucket_entries = 4,
.key_len = sizeof(struct ipv6_5tuple),
.hash_func = DEFAULT_HASH_FUNC,
.hash_func_init_val = 0,
char s[64];
/* create the LPM table */
+ struct rte_lpm_config lpm_ipv4_config;
+
+ lpm_ipv4_config.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
+ lpm_ipv4_config.number_tbl8s = 256;
+ lpm_ipv4_config.flags = 0;
+
snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
- ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
- IPV4_L3FWD_LPM_MAX_RULES, 0);
+ ipv4_l3fwd_lookup_struct[socketid] =
+ rte_lpm_create(s, socketid, &lpm_ipv4_config);
if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
" on socket %d\n", socketid);
snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
pktmbuf_pool[socketid] =
rte_pktmbuf_pool_create(s, nb_mbuf,
- MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+ MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
socketid);
if (pktmbuf_pool[socketid] == NULL)
rte_exit(EXIT_FAILURE,
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == 0) {
+ if (link.link_status == ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
}
}
+static int check_ptype(uint16_t portid)
+{
+ int i, ret;
+ int ptype_l3_ipv4 = 0;
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ int ptype_l3_ipv6 = 0;
+#endif
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ptype_l3_ipv4 = 1;
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ptype_l3_ipv6 = 1;
+#endif
+ }
+
+ if (ptype_l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ if (ptype_l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+ if (ptype_l3_ipv4)
+#else /* APP_LOOKUP_EXACT_MATCH */
+ if (ptype_l3_ipv4 && ptype_l3_ipv6)
+#endif
+ return 1;
+
+ return 0;
+
+}
+
int
main(int argc, char **argv)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret;
- unsigned nb_ports;
+ uint16_t nb_ports;
uint16_t queueid;
unsigned lcore_id;
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint32_t dev_rxq_num, dev_txq_num;
+ uint8_t nb_rx_queue, queue, socketid;
+ uint16_t portid;
+ uint16_t org_rxq_intr = port_conf.intr_conf.rxq;
/* catch SIGINT and restore cpufreq governor to ondemand */
signal(SIGINT, signal_exit_now);
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
-
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
printf("Initializing port %d ... ", portid );
fflush(stdout);
+ rte_eth_dev_info_get(portid, &dev_info);
+ dev_rxq_num = dev_info.max_rx_queues;
+ dev_txq_num = dev_info.max_tx_queues;
+
nb_rx_queue = get_port_n_rx_queues(portid);
+ if (nb_rx_queue > dev_rxq_num)
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure not existed rxq: "
+ "port=%d\n", portid);
+
n_tx_queue = nb_lcores;
- if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
- n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+ if (n_tx_queue > dev_txq_num)
+ n_tx_queue = dev_txq_num;
printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
nb_rx_queue, (unsigned)n_tx_queue );
+ /* If number of Rx queue is 0, no need to enable Rx interrupt */
+ if (nb_rx_queue == 0)
+ port_conf.intr_conf.rxq = 0;
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &port_conf);
+ /* Revert to original value */
+ port_conf.intr_conf.rxq = org_rxq_intr;
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_mem failed\n");
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ /* Initialize TX buffers */
+ qconf = &lcore_conf[lcore_id];
+ qconf->tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+ rte_eth_dev_socket_id(portid));
+ if (qconf->tx_buffer[portid] == NULL)
+ rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n",
+ portid);
+
+ rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST);
+ }
+
/* init one TX queue per couple (lcore,port) */
queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
+ if (queueid >= dev_txq_num)
+ continue;
+
if (numa_on)
socketid = \
(uint8_t)rte_lcore_to_socket_id(lcore_id);
qconf = &lcore_conf[lcore_id];
qconf->tx_queue_id[portid] = queueid;
queueid++;
+
+ qconf->tx_port_id[qconf->n_tx_port] = portid;
+ qconf->n_tx_port++;
}
printf("\n");
}
/* init power management library */
ret = rte_power_init(lcore_id);
if (ret)
- rte_exit(EXIT_FAILURE, "Power management library "
- "initialization failed on core%u\n", lcore_id);
+ RTE_LOG(ERR, POWER,
+ "Library initialization failed on core %u\n", lcore_id);
/* init timer structures for each enabled lcore */
rte_timer_init(&power_timers[lcore_id]);
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ if (parse_ptype) {
+ if (add_cb_parse_ptype(portid, queueid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Fail to add ptype cb\n");
+ } else if (!check_ptype(portid))
+ rte_exit(EXIT_FAILURE,
+ "PMD can not provide needed ptypes\n");
}
}
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, "
"port=%d\n", ret, portid);
-
/*
* If enabled, put device in promiscuous mode.
* This allows IO forwarding mode to forward packets
*/
if (promiscuous_on)
rte_eth_promiscuous_enable(portid);
+ /* initialize spinlock for each port */
+ rte_spinlock_init(&(locks[portid]));
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);