APP_MODE_DEFAULT = 0,
APP_MODE_LEGACY,
APP_MODE_EMPTY_POLL,
- APP_MODE_TELEMETRY
+ APP_MODE_TELEMETRY,
+ APP_MODE_INTERRUPT
};
enum appmode app_mode;
return 0;
}
+
+/* main processing loop */
+static int main_intr_loop(__rte_unused void *dummy)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ unsigned int lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ int i, j, nb_rx;
+ uint8_t queueid;
+ uint16_t portid;
+ struct lcore_conf *qconf;
+ struct lcore_rx_queue *rx_queue;
+ uint32_t lcore_rx_idle_count = 0;
+ uint32_t lcore_idle_hint = 0;
+ int intr_en = 0;
+
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+ US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+
+ if (qconf->n_rx_queue == 0) {
+ RTE_LOG(INFO, L3FWD_POWER, "lcore %u has nothing to do\n",
+ lcore_id);
+ return 0;
+ }
+
+ RTE_LOG(INFO, L3FWD_POWER, "entering main interrupt loop on lcore %u\n",
+ lcore_id);
+
+ for (i = 0; i < qconf->n_rx_queue; i++) {
+ portid = qconf->rx_queue_list[i].port_id;
+ queueid = qconf->rx_queue_list[i].queue_id;
+ RTE_LOG(INFO, L3FWD_POWER,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
+ }
+
+ /* add into event wait list */
+ if (event_register(qconf) == 0)
+ intr_en = 1;
+ else
+ RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n");
+
+ while (!is_done()) {
+ stats[lcore_id].nb_iteration_looped++;
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_tx_port; ++i) {
+ portid = qconf->tx_port_id[i];
+ rte_eth_tx_buffer_flush(portid,
+ qconf->tx_queue_id[portid],
+ qconf->tx_buffer[portid]);
+ }
+ prev_tsc = cur_tsc;
+ }
+
+start_rx:
+ /*
+ * Read packet from RX queues
+ */
+ lcore_rx_idle_count = 0;
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ rx_queue->idle_hint = 0;
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+
+ nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
+ MAX_PKT_BURST);
+
+ stats[lcore_id].nb_rx_processed += nb_rx;
+ if (unlikely(nb_rx == 0)) {
+ /**
+ * no packet received from rx queue, try to
+ * sleep for a while forcing CPU enter deeper
+ * C states.
+ */
+ rx_queue->zero_rx_packet_count++;
+
+ if (rx_queue->zero_rx_packet_count <=
+ MIN_ZERO_POLL_COUNT)
+ continue;
+
+ rx_queue->idle_hint = power_idle_heuristic(
+ rx_queue->zero_rx_packet_count);
+ lcore_rx_idle_count++;
+ } else {
+ rx_queue->zero_rx_packet_count = 0;
+ }
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j], void *));
+ }
+
+ /* Prefetch and forward already prefetched packets */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j + PREFETCH_OFFSET],
+ void *));
+ l3fwd_simple_forward(
+ pkts_burst[j], portid, qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++) {
+ l3fwd_simple_forward(
+ pkts_burst[j], portid, qconf);
+ }
+ }
+
+ if (unlikely(lcore_rx_idle_count == qconf->n_rx_queue)) {
+ /**
+ * All Rx queues empty in recent consecutive polls,
+ * sleep in a conservative manner, meaning sleep as
+ * less as possible.
+ */
+ for (i = 1,
+ lcore_idle_hint = qconf->rx_queue_list[0].idle_hint;
+ i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ if (rx_queue->idle_hint < lcore_idle_hint)
+ lcore_idle_hint = rx_queue->idle_hint;
+ }
+
+ if (lcore_idle_hint < SUSPEND_THRESHOLD)
+ /**
+ * execute "pause" instruction to avoid context
+ * switch which generally take hundred of
+ * microseconds for short sleep.
+ */
+ rte_delay_us(lcore_idle_hint);
+ else {
+ /* suspend until rx interrupt triggers */
+ if (intr_en) {
+ turn_on_off_intr(qconf, 1);
+ sleep_until_rx_interrupt(
+ qconf->n_rx_queue);
+ turn_on_off_intr(qconf, 0);
+ /**
+ * start receiving packets immediately
+ */
+ if (likely(!is_done()))
+ goto start_rx;
+ }
+ }
+ stats[lcore_id].sleep_time += lcore_idle_hint;
+ }
+ }
+
+ return 0;
+}
+
/* main processing loop */
static int
main_telemetry_loop(__rte_unused void *dummy)
" --empty-poll: enable empty poll detection"
" follow (training_flag, high_threshold, med_threshold)\n"
" --telemetry: enable telemetry mode, to update"
- " empty polls, full polls, and core busyness to telemetry\n",
+ " empty polls, full polls, and core busyness to telemetry\n"
+ " --interrupt-only: enable interrupt-only mode\n",
prgname);
}
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
#define CMD_LINE_OPT_LEGACY "legacy"
#define CMD_LINE_OPT_EMPTY_POLL "empty-poll"
+#define CMD_LINE_OPT_INTERRUPT_ONLY "interrupt-only"
#define CMD_LINE_OPT_TELEMETRY "telemetry"
/* Parse the argument given in the command line of the application */
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{CMD_LINE_OPT_LEGACY, 0, 0, 0},
{CMD_LINE_OPT_TELEMETRY, 0, 0, 0},
+ {CMD_LINE_OPT_INTERRUPT_ONLY, 0, 0, 0},
{NULL, 0, 0, 0}
};
printf("telemetry mode is enabled\n");
}
+ if (!strncmp(lgopts[option_index].name,
+ CMD_LINE_OPT_INTERRUPT_ONLY,
+ sizeof(CMD_LINE_OPT_INTERRUPT_ONLY))) {
+ if (app_mode != APP_MODE_DEFAULT) {
+ printf(" interrupt-only mode is mutually exclusive with other modes\n");
+ return -1;
+ }
+ app_mode = APP_MODE_INTERRUPT;
+ printf("interrupt-only mode is enabled\n");
+ }
+
if (!strncmp(lgopts[option_index].name,
"enable-jumbo", 12)) {
struct option lenopts =
RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* not all app modes need interrupts */
- bool need_intr = app_mode == APP_MODE_LEGACY;
+ bool need_intr = app_mode == APP_MODE_LEGACY ||
+ app_mode == APP_MODE_INTERRUPT;
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
"Returns global power stats. Parameters: None");
rte_eal_mp_remote_launch(main_telemetry_loop, NULL,
SKIP_MASTER);
+ } else if (app_mode == APP_MODE_INTERRUPT) {
+ rte_eal_mp_remote_launch(main_intr_loop, NULL, CALL_MASTER);
}
if (app_mode == APP_MODE_EMPTY_POLL || app_mode == APP_MODE_TELEMETRY)