/**< disabled by default */
enum appmode {
- APP_MODE_LEGACY = 0,
+ APP_MODE_DEFAULT = 0,
+ APP_MODE_LEGACY,
APP_MODE_EMPTY_POLL,
- APP_MODE_TELEMETRY
+ APP_MODE_TELEMETRY,
+ APP_MODE_INTERRUPT
};
enum appmode app_mode;
* 0 on success
*/
static int
-sleep_until_rx_interrupt(int num)
+sleep_until_rx_interrupt(int num, int lcore)
{
/*
* we want to track when we are woken up by traffic so that we can go
- * back to sleep again without log spamming.
+ * back to sleep again without log spamming. Avoid cache line sharing
+ * to prevent threads stepping on each others' toes.
*/
- static bool timeout;
+ static struct {
+ bool wakeup;
+ } __rte_cache_aligned status[RTE_MAX_LCORE];
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
uint8_t queue_id;
void *data;
- if (!timeout) {
+ if (status[lcore].wakeup) {
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u sleeps until interrupt triggers\n",
rte_lcore_id());
" port %d queue %d\n",
rte_lcore_id(), port_id, queue_id);
}
- timeout = n == 0;
+ status[lcore].wakeup = n != 0;
return 0;
}
return 0;
}
+
+/* main processing loop */
+static int main_intr_loop(__rte_unused void *dummy)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ unsigned int lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ int i, j, nb_rx;
+ uint8_t queueid;
+ uint16_t portid;
+ struct lcore_conf *qconf;
+ struct lcore_rx_queue *rx_queue;
+ uint32_t lcore_rx_idle_count = 0;
+ uint32_t lcore_idle_hint = 0;
+ int intr_en = 0;
+
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+ US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+
+ if (qconf->n_rx_queue == 0) {
+ RTE_LOG(INFO, L3FWD_POWER, "lcore %u has nothing to do\n",
+ lcore_id);
+ return 0;
+ }
+
+ RTE_LOG(INFO, L3FWD_POWER, "entering main interrupt loop on lcore %u\n",
+ lcore_id);
+
+ for (i = 0; i < qconf->n_rx_queue; i++) {
+ portid = qconf->rx_queue_list[i].port_id;
+ queueid = qconf->rx_queue_list[i].queue_id;
+ RTE_LOG(INFO, L3FWD_POWER,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ /* add into event wait list */
+ if (event_register(qconf) == 0)
+ intr_en = 1;
+ else
+ RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n");
+
+ while (!is_done()) {
+ stats[lcore_id].nb_iteration_looped++;
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_tx_port; ++i) {
+ portid = qconf->tx_port_id[i];
+ rte_eth_tx_buffer_flush(portid,
+ qconf->tx_queue_id[portid],
+ qconf->tx_buffer[portid]);
+ }
+ prev_tsc = cur_tsc;
+ }
+
+start_rx:
+ /*
+ * Read packet from RX queues
+ */
+ lcore_rx_idle_count = 0;
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ rx_queue->idle_hint = 0;
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+
+ nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
+ MAX_PKT_BURST);
+
+ stats[lcore_id].nb_rx_processed += nb_rx;
+ if (unlikely(nb_rx == 0)) {
+ /**
+ * no packet received from rx queue, try to
+ * sleep for a while forcing CPU enter deeper
+ * C states.
+ */
+ rx_queue->zero_rx_packet_count++;
+
+ if (rx_queue->zero_rx_packet_count <=
+ MIN_ZERO_POLL_COUNT)
+ continue;
+
+ rx_queue->idle_hint = power_idle_heuristic(
+ rx_queue->zero_rx_packet_count);
+ lcore_rx_idle_count++;
+ } else {
+ rx_queue->zero_rx_packet_count = 0;
+ }
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j], void *));
+ }
+
+ /* Prefetch and forward already prefetched packets */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j + PREFETCH_OFFSET],
+ void *));
+ l3fwd_simple_forward(
+ pkts_burst[j], portid, qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++) {
+ l3fwd_simple_forward(
+ pkts_burst[j], portid, qconf);
+ }
+ }
+
+ if (unlikely(lcore_rx_idle_count == qconf->n_rx_queue)) {
+ /**
+ * All Rx queues empty in recent consecutive polls,
+ * sleep in a conservative manner, meaning sleep as
+ * less as possible.
+ */
+ for (i = 1,
+ lcore_idle_hint = qconf->rx_queue_list[0].idle_hint;
+ i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ if (rx_queue->idle_hint < lcore_idle_hint)
+ lcore_idle_hint = rx_queue->idle_hint;
+ }
+
+ if (lcore_idle_hint < SUSPEND_THRESHOLD)
+ /**
+ * execute "pause" instruction to avoid context
+ * switch which generally take hundred of
+ * microseconds for short sleep.
+ */
+ rte_delay_us(lcore_idle_hint);
+ else {
+ /* suspend until rx interrupt triggers */
+ if (intr_en) {
+ turn_on_off_intr(qconf, 1);
+ sleep_until_rx_interrupt(
+ qconf->n_rx_queue,
+ lcore_id);
+ turn_on_off_intr(qconf, 0);
+ /**
+ * start receiving packets immediately
+ */
+ if (likely(!is_done()))
+ goto start_rx;
+ }
+ }
+ stats[lcore_id].sleep_time += lcore_idle_hint;
+ }
+ }
+
+ return 0;
+}
+
/* main processing loop */
static int
main_telemetry_loop(__rte_unused void *dummy)
}
/* main processing loop */
static int
-main_loop(__rte_unused void *dummy)
+main_legacy_loop(__rte_unused void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
if (intr_en) {
turn_on_off_intr(qconf, 1);
sleep_until_rx_interrupt(
- qconf->n_rx_queue);
+ qconf->n_rx_queue,
+ lcore_id);
turn_on_off_intr(qconf, 0);
/**
* start receiving packets immediately
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
- printf("cannot enable master core %d in config for telemetry mode\n",
+ printf("cannot enable main core %d in config for telemetry mode\n",
rte_lcore_id());
return -1;
}
" --enable-jumbo: enable jumbo frame"
" which max packet len is PKTLEN in decimal (64-9600)\n"
" --parse-ptype: parse packet type by software\n"
+ " --legacy: use legacy interrupt-based scaling\n"
" --empty-poll: enable empty poll detection"
" follow (training_flag, high_threshold, med_threshold)\n"
" --telemetry: enable telemetry mode, to update"
- " empty polls, full polls, and core busyness to telemetry\n",
+ " empty polls, full polls, and core busyness to telemetry\n"
+ " --interrupt-only: enable interrupt-only mode\n",
prgname);
}
/* parse hexadecimal string */
pm = strtoul(portmask, &end, 16);
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
- return -1;
-
- if (pm == 0)
- return -1;
+ return 0;
return pm;
}
}
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+#define CMD_LINE_OPT_LEGACY "legacy"
+#define CMD_LINE_OPT_EMPTY_POLL "empty-poll"
+#define CMD_LINE_OPT_INTERRUPT_ONLY "interrupt-only"
#define CMD_LINE_OPT_TELEMETRY "telemetry"
/* Parse the argument given in the command line of the application */
{"high-perf-cores", 1, 0, 0},
{"no-numa", 0, 0, 0},
{"enable-jumbo", 0, 0, 0},
- {"empty-poll", 1, 0, 0},
+ {CMD_LINE_OPT_EMPTY_POLL, 1, 0, 0},
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
+ {CMD_LINE_OPT_LEGACY, 0, 0, 0},
{CMD_LINE_OPT_TELEMETRY, 0, 0, 0},
+ {CMD_LINE_OPT_INTERRUPT_ONLY, 0, 0, 0},
{NULL, 0, 0, 0}
};
}
if (!strncmp(lgopts[option_index].name,
- "empty-poll", 10)) {
- if (app_mode == APP_MODE_TELEMETRY) {
- printf(" empty-poll cannot be enabled as telemetry mode is enabled\n");
+ CMD_LINE_OPT_LEGACY,
+ sizeof(CMD_LINE_OPT_LEGACY))) {
+ if (app_mode != APP_MODE_DEFAULT) {
+ printf(" legacy mode is mutually exclusive with other modes\n");
+ return -1;
+ }
+ app_mode = APP_MODE_LEGACY;
+ printf("legacy mode is enabled\n");
+ }
+
+ if (!strncmp(lgopts[option_index].name,
+ CMD_LINE_OPT_EMPTY_POLL, 10)) {
+ if (app_mode != APP_MODE_DEFAULT) {
+ printf(" empty-poll mode is mutually exclusive with other modes\n");
return -1;
}
app_mode = APP_MODE_EMPTY_POLL;
if (!strncmp(lgopts[option_index].name,
CMD_LINE_OPT_TELEMETRY,
sizeof(CMD_LINE_OPT_TELEMETRY))) {
- if (app_mode == APP_MODE_EMPTY_POLL) {
- printf("telemetry mode cannot be enabled as empty poll mode is enabled\n");
+ if (app_mode != APP_MODE_DEFAULT) {
+ printf(" telemetry mode is mutually exclusive with other modes\n");
return -1;
}
app_mode = APP_MODE_TELEMETRY;
printf("telemetry mode is enabled\n");
}
+ if (!strncmp(lgopts[option_index].name,
+ CMD_LINE_OPT_INTERRUPT_ONLY,
+ sizeof(CMD_LINE_OPT_INTERRUPT_ONLY))) {
+ if (app_mode != APP_MODE_DEFAULT) {
+ printf(" interrupt-only mode is mutually exclusive with other modes\n");
+ return -1;
+ }
+ app_mode = APP_MODE_INTERRUPT;
+ printf("interrupt-only mode is enabled\n");
+ }
+
if (!strncmp(lgopts[option_index].name,
"enable-jumbo", 12)) {
struct option lenopts =
uint16_t portid;
struct rte_eth_link link;
int ret;
+ char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
printf("\nChecking link status");
fflush(stdout);
}
/* print link status if flag set */
if (print_flag == 1) {
- if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex"));
- else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ rte_eth_link_to_str(link_status_text,
+ sizeof(link_status_text), &link);
+ printf("Port %d %s\n", portid,
+ link_status_text);
continue;
}
/* clear all_ports_up flag if any link down */
static int
init_power_library(void)
{
+ enum power_management_env env;
unsigned int lcore_id;
int ret = 0;
lcore_id);
return ret;
}
+ /* we're not supporting the VM channel mode */
+ env = rte_power_get_env();
+ if (env != PM_ENV_ACPI_CPUFREQ &&
+ env != PM_ENV_PSTATE_CPUFREQ) {
+ RTE_LOG(ERR, POWER,
+ "Only ACPI and PSTATE mode are supported\n");
+ return -1;
+ }
}
return ret;
}
uint64_t app_eps = 0, app_fps = 0, app_br = 0;
uint64_t count = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
qconf = &lcore_conf[lcore_id];
if (qconf->n_rx_queue == 0)
continue;
RTE_SET_USED(lcore_id);
- if (rte_get_master_lcore() != lcore_id) {
- rte_panic("timer on lcore:%d which is not master core:%d\n",
+ if (rte_get_main_lcore() != lcore_id) {
+ rte_panic("timer on lcore:%d which is not main core:%d\n",
lcore_id,
- rte_get_master_lcore());
+ rte_get_main_lcore());
}
RTE_LOG(INFO, POWER, "Bring up the Timer\n");
return 0;
}
+static int
+autodetect_mode(void)
+{
+ RTE_LOG(NOTICE, L3FWD_POWER, "Operating mode not specified, probing frequency scaling support...\n");
+
+ /*
+ * Empty poll and telemetry modes have to be specifically requested to
+ * be enabled, but we can auto-detect between interrupt mode with or
+ * without frequency scaling. Both ACPI and pstate can be used.
+ */
+ if (rte_power_check_env_supported(PM_ENV_ACPI_CPUFREQ))
+ return APP_MODE_LEGACY;
+ if (rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ))
+ return APP_MODE_LEGACY;
+
+ RTE_LOG(NOTICE, L3FWD_POWER, "Frequency scaling not supported, selecting interrupt-only mode\n");
+
+ return APP_MODE_INTERRUPT;
+}
+
+static const char *
+mode_to_str(enum appmode mode)
+{
+ switch (mode) {
+ case APP_MODE_LEGACY:
+ return "legacy";
+ case APP_MODE_EMPTY_POLL:
+ return "empty poll";
+ case APP_MODE_TELEMETRY:
+ return "telemetry";
+ case APP_MODE_INTERRUPT:
+ return "interrupt-only";
+ default:
+ return "invalid";
+ }
+}
int
main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
- if (app_mode != APP_MODE_TELEMETRY && init_power_library())
+ if (app_mode == APP_MODE_DEFAULT)
+ app_mode = autodetect_mode();
+
+ RTE_LOG(INFO, L3FWD_POWER, "Selected operation mode: %s\n",
+ mode_to_str(app_mode));
+
+ /* only legacy and empty poll mode rely on power library */
+ if ((app_mode == APP_MODE_LEGACY || app_mode == APP_MODE_EMPTY_POLL) &&
+ init_power_library())
rte_exit(EXIT_FAILURE, "init_power_library failed\n");
if (update_lcore_params() < 0)
RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* not all app modes need interrupts */
- bool need_intr = app_mode == APP_MODE_LEGACY;
+ bool need_intr = app_mode == APP_MODE_LEGACY ||
+ app_mode == APP_MODE_INTERRUPT;
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
if (add_cb_parse_ptype(portid, queueid) < 0)
rte_exit(EXIT_FAILURE,
"Fail to add ptype cb\n");
- } else if (!check_ptype(portid))
- rte_exit(EXIT_FAILURE,
- "PMD can not provide needed ptypes\n");
+ }
}
}
}
/* initialize spinlock for each port */
rte_spinlock_init(&(locks[portid]));
+
+ if (!parse_ptype)
+ if (!check_ptype(portid))
+ rte_exit(EXIT_FAILURE,
+ "PMD can not provide needed ptypes\n");
}
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
if (app_mode == APP_MODE_LEGACY) {
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(main_legacy_loop, NULL, CALL_MAIN);
} else if (app_mode == APP_MODE_EMPTY_POLL) {
empty_poll_stop = false;
rte_eal_mp_remote_launch(main_empty_poll_loop, NULL,
- SKIP_MASTER);
- } else {
+ SKIP_MAIN);
+ } else if (app_mode == APP_MODE_TELEMETRY) {
unsigned int i;
/* Init metrics library */
else
rte_exit(EXIT_FAILURE, "failed to register metrics names");
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_spinlock_init(&stats[lcore_id].telemetry_lock);
}
rte_timer_init(&telemetry_timer);
handle_app_stats,
"Returns global power stats. Parameters: None");
rte_eal_mp_remote_launch(main_telemetry_loop, NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
+ } else if (app_mode == APP_MODE_INTERRUPT) {
+ rte_eal_mp_remote_launch(main_intr_loop, NULL, CALL_MAIN);
}
if (app_mode == APP_MODE_EMPTY_POLL || app_mode == APP_MODE_TELEMETRY)
launch_timer(rte_lcore_id());
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- rte_eth_dev_stop(portid);
+ ret = rte_eth_dev_stop(portid);
+ if (ret != 0)
+ RTE_LOG(ERR, L3FWD_POWER, "rte_eth_dev_stop: err=%d, port=%u\n",
+ ret, portid);
+
rte_eth_dev_close(portid);
}
if (app_mode == APP_MODE_EMPTY_POLL)
rte_power_empty_poll_stat_free();
- if (app_mode != APP_MODE_TELEMETRY && deinit_power_library())
+ if ((app_mode == APP_MODE_LEGACY || app_mode == APP_MODE_EMPTY_POLL) &&
+ deinit_power_library())
rte_exit(EXIT_FAILURE, "deinit_power_library failed\n");
if (rte_eal_cleanup() < 0)