+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+
+ nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
+ MAX_PKT_BURST);
+ ep_nep[nb_rx == 0]++;
+ fp_nfp[nb_rx == MAX_PKT_BURST]++;
+ poll_count++;
+ if (unlikely(nb_rx == 0))
+ continue;
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j], void *));
+ }
+
+ /* Prefetch and forward already prefetched packets */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+ j + PREFETCH_OFFSET], void *));
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++) {
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+ }
+ if (unlikely(poll_count >= DEFAULT_COUNT)) {
+ diff_tsc = cur_tsc - prev_tel_tsc;
+ if (diff_tsc >= MAX_CYCLES) {
+ br = FULL;
+ } else if (diff_tsc > MIN_CYCLES &&
+ diff_tsc < MAX_CYCLES) {
+ br = (diff_tsc * 100) / MAX_CYCLES;
+ } else {
+ br = ZERO;
+ }
+ poll_count = 0;
+ prev_tel_tsc = cur_tsc;
+ /* update stats for telemetry */
+ rte_spinlock_lock(&stats[lcore_id].telemetry_lock);
+ stats[lcore_id].ep_nep[0] = ep_nep[0];
+ stats[lcore_id].ep_nep[1] = ep_nep[1];
+ stats[lcore_id].fp_nfp[0] = fp_nfp[0];
+ stats[lcore_id].fp_nfp[1] = fp_nfp[1];
+ stats[lcore_id].br = br;
+ rte_spinlock_unlock(&stats[lcore_id].telemetry_lock);
+ }
+ }
+
+ return 0;
+}
+/* main processing loop */
+static int
+main_empty_poll_loop(__rte_unused void *dummy)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ unsigned int lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ int i, j, nb_rx;
+ uint8_t queueid;
+ uint16_t portid;
+ struct lcore_conf *qconf;
+ struct lcore_rx_queue *rx_queue;
+
+ const uint64_t drain_tsc =
+ (rte_get_tsc_hz() + US_PER_S - 1) /
+ US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+
+ if (qconf->n_rx_queue == 0) {
+ RTE_LOG(INFO, L3FWD_POWER, "lcore %u has nothing to do\n",
+ lcore_id);
+ return 0;
+ }
+
+ for (i = 0; i < qconf->n_rx_queue; i++) {
+ portid = qconf->rx_queue_list[i].port_id;
+ queueid = qconf->rx_queue_list[i].queue_id;
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
+ "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ }
+
+ while (!is_done()) {
+ stats[lcore_id].nb_iteration_looped++;
+
+ cur_tsc = rte_rdtsc();
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_tx_port; ++i) {
+ portid = qconf->tx_port_id[i];
+ rte_eth_tx_buffer_flush(portid,
+ qconf->tx_queue_id[portid],
+ qconf->tx_buffer[portid]);
+ }
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ rx_queue->idle_hint = 0;
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+
+ nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
+ MAX_PKT_BURST);
+
+ stats[lcore_id].nb_rx_processed += nb_rx;
+
+ if (nb_rx == 0) {
+
+ rte_power_empty_poll_stat_update(lcore_id);
+
+ continue;
+ } else {
+ rte_power_poll_stat_update(lcore_id, nb_rx);
+ }
+
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j], void *));
+ }
+
+ /* Prefetch and forward already prefetched packets */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+ j + PREFETCH_OFFSET],
+ void *));
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++) {
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+
+ }
+
+ }
+
+ return 0;
+}
+/* main processing loop */
+static int
+main_legacy_loop(__rte_unused void *dummy)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ unsigned lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
+ uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
+ int i, j, nb_rx;
+ uint8_t queueid;
+ uint16_t portid;
+ struct lcore_conf *qconf;
+ struct lcore_rx_queue *rx_queue;
+ enum freq_scale_hint_t lcore_scaleup_hint;
+ uint32_t lcore_rx_idle_count = 0;
+ uint32_t lcore_idle_hint = 0;
+ int intr_en = 0;
+
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
+ hz = rte_get_timer_hz();
+ tim_res_tsc = hz/TIMER_NUMBER_PER_SECOND;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+
+ if (qconf->n_rx_queue == 0) {
+ RTE_LOG(INFO, L3FWD_POWER, "lcore %u has nothing to do\n", lcore_id);
+ return 0;
+ }
+
+ RTE_LOG(INFO, L3FWD_POWER, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_queue; i++) {
+ portid = qconf->rx_queue_list[i].port_id;
+ queueid = qconf->rx_queue_list[i].queue_id;
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
+ "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ }
+
+ /* add into event wait list */
+ if (event_register(qconf) == 0)
+ intr_en = 1;
+ else
+ RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n");
+
+ while (!is_done()) {
+ stats[lcore_id].nb_iteration_looped++;
+
+ cur_tsc = rte_rdtsc();
+ cur_tsc_power = cur_tsc;
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_tx_port; ++i) {
+ portid = qconf->tx_port_id[i];
+ rte_eth_tx_buffer_flush(portid,
+ qconf->tx_queue_id[portid],
+ qconf->tx_buffer[portid]);
+ }
+ prev_tsc = cur_tsc;
+ }
+
+ diff_tsc_power = cur_tsc_power - prev_tsc_power;
+ if (diff_tsc_power > tim_res_tsc) {
+ rte_timer_manage();
+ prev_tsc_power = cur_tsc_power;
+ }
+
+start_rx:
+ /*
+ * Read packet from RX queues
+ */
+ lcore_scaleup_hint = FREQ_CURRENT;
+ lcore_rx_idle_count = 0;
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ rx_queue->idle_hint = 0;
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+
+ nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
+ MAX_PKT_BURST);
+
+ stats[lcore_id].nb_rx_processed += nb_rx;
+ if (unlikely(nb_rx == 0)) {
+ /**
+ * no packet received from rx queue, try to
+ * sleep for a while forcing CPU enter deeper
+ * C states.
+ */
+ rx_queue->zero_rx_packet_count++;
+
+ if (rx_queue->zero_rx_packet_count <=
+ MIN_ZERO_POLL_COUNT)
+ continue;
+
+ rx_queue->idle_hint = power_idle_heuristic(\
+ rx_queue->zero_rx_packet_count);
+ lcore_rx_idle_count++;
+ } else {
+ rx_queue->zero_rx_packet_count = 0;
+
+ /**
+ * do not scale up frequency immediately as
+ * user to kernel space communication is costly
+ * which might impact packet I/O for received
+ * packets.
+ */
+ rx_queue->freq_up_hint =
+ power_freq_scaleup_heuristic(lcore_id,
+ portid, queueid);
+ }
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j], void *));
+ }
+
+ /* Prefetch and forward already prefetched packets */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+ j + PREFETCH_OFFSET], void *));
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++) {
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+ }
+
+ if (likely(lcore_rx_idle_count != qconf->n_rx_queue)) {
+ for (i = 1, lcore_scaleup_hint =
+ qconf->rx_queue_list[0].freq_up_hint;
+ i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ if (rx_queue->freq_up_hint >
+ lcore_scaleup_hint)
+ lcore_scaleup_hint =
+ rx_queue->freq_up_hint;
+ }
+
+ if (lcore_scaleup_hint == FREQ_HIGHEST) {
+ if (rte_power_freq_max)
+ rte_power_freq_max(lcore_id);
+ } else if (lcore_scaleup_hint == FREQ_HIGHER) {
+ if (rte_power_freq_up)
+ rte_power_freq_up(lcore_id);
+ }
+ } else {
+ /**
+ * All Rx queues empty in recent consecutive polls,
+ * sleep in a conservative manner, meaning sleep as
+ * less as possible.
+ */
+ for (i = 1, lcore_idle_hint =
+ qconf->rx_queue_list[0].idle_hint;
+ i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ if (rx_queue->idle_hint < lcore_idle_hint)
+ lcore_idle_hint = rx_queue->idle_hint;
+ }
+
+ if (lcore_idle_hint < SUSPEND_THRESHOLD)
+ /**
+ * execute "pause" instruction to avoid context
+ * switch which generally take hundred of
+ * microseconds for short sleep.
+ */
+ rte_delay_us(lcore_idle_hint);
+ else {
+ /* suspend until rx interrupt triggers */
+ if (intr_en) {
+ turn_on_off_intr(qconf, 1);
+ sleep_until_rx_interrupt(
+ qconf->n_rx_queue);
+ turn_on_off_intr(qconf, 0);
+ /**
+ * start receiving packets immediately
+ */
+ if (likely(!is_done()))
+ goto start_rx;
+ }
+ }
+ stats[lcore_id].sleep_time += lcore_idle_hint;
+ }
+ }
+
+ return 0;
+}
+
+static int
+check_lcore_params(void)
+{
+ uint8_t queue, lcore;
+ uint16_t i;
+ int socketid;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ queue = lcore_params[i].queue_id;
+ if (queue >= MAX_RX_QUEUE_PER_PORT) {
+ printf("invalid queue number: %hhu\n", queue);
+ return -1;
+ }
+ lcore = lcore_params[i].lcore_id;
+ if (!rte_lcore_is_enabled(lcore)) {
+ printf("error: lcore %hhu is not enabled in lcore "
+ "mask\n", lcore);
+ return -1;
+ }
+ if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
+ (numa_on == 0)) {
+ printf("warning: lcore %hhu is on socket %d with numa "
+ "off\n", lcore, socketid);
+ }
+ if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
+ printf("cannot enable master core %d in config for telemetry mode\n",
+ rte_lcore_id());
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+check_port_config(void)
+{
+ unsigned portid;
+ uint16_t i;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ portid = lcore_params[i].port_id;
+ if ((enabled_port_mask & (1 << portid)) == 0) {
+ printf("port %u is not enabled in port mask\n",
+ portid);
+ return -1;
+ }
+ if (!rte_eth_dev_is_valid_port(portid)) {
+ printf("port %u is not present on the board\n",
+ portid);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static uint8_t
+get_port_n_rx_queues(const uint16_t port)
+{
+ int queue = -1;
+ uint16_t i;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ if (lcore_params[i].port_id == port &&
+ lcore_params[i].queue_id > queue)
+ queue = lcore_params[i].queue_id;
+ }
+ return (uint8_t)(++queue);
+}
+
+static int
+init_lcore_rx_queues(void)
+{
+ uint16_t i, nb_rx_queue;
+ uint8_t lcore;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ lcore = lcore_params[i].lcore_id;
+ nb_rx_queue = lcore_conf[lcore].n_rx_queue;
+ if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+ printf("error: too many queues (%u) for lcore: %u\n",
+ (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+ return -1;
+ } else {
+ lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
+ lcore_params[i].port_id;
+ lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
+ lcore_params[i].queue_id;
+ lcore_conf[lcore].n_rx_queue++;
+ }
+ }
+ return 0;
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+ printf ("%s [EAL options] -- -p PORTMASK -P"
+ " [--config (port,queue,lcore)[,(port,queue,lcore]]"
+ " [--high-perf-cores CORELIST"
+ " [--perf-config (port,queue,hi_perf,lcore_index)[,(port,queue,hi_perf,lcore_index]]"
+ " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -P : enable promiscuous mode\n"
+ " --config (port,queue,lcore): rx queues configuration\n"
+ " --high-perf-cores CORELIST: list of high performance cores\n"
+ " --perf-config: similar as config, cores specified as indices"
+ " for bins containing high or regular performance cores\n"
+ " --no-numa: optional, disable numa awareness\n"
+ " --enable-jumbo: enable jumbo frame"
+ " which max packet len is PKTLEN in decimal (64-9600)\n"
+ " --parse-ptype: parse packet type by software\n"
+ " --legacy: use legacy interrupt-based scaling\n"
+ " --empty-poll: enable empty poll detection"
+ " follow (training_flag, high_threshold, med_threshold)\n"
+ " --telemetry: enable telemetry mode, to update"
+ " empty polls, full polls, and core busyness to telemetry\n"
+ " --interrupt-only: enable interrupt-only mode\n",
+ prgname);
+}
+
+static int parse_max_pkt_len(const char *pktlen)
+{
+ char *end = NULL;
+ unsigned long len;
+
+ /* parse decimal string */
+ len = strtoul(pktlen, &end, 10);
+ if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;