1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_atomic.h>
10 #include <rte_malloc.h>
13 #include "rte_power.h"
14 #include "rte_power_empty_poll.h"
16 #define INTERVALS_PER_SECOND 100 /* (10ms) */
17 #define SECONDS_TO_TRAIN_FOR 2
18 #define DEFAULT_MED_TO_HIGH_PERCENT_THRESHOLD 70
19 #define DEFAULT_HIGH_TO_MED_PERCENT_THRESHOLD 30
20 #define DEFAULT_CYCLES_PER_PACKET 800
22 static struct ep_params *ep_params;
23 static uint32_t med_to_high_threshold = DEFAULT_MED_TO_HIGH_PERCENT_THRESHOLD;
24 static uint32_t high_to_med_threshold = DEFAULT_HIGH_TO_MED_PERCENT_THRESHOLD;
26 static uint32_t avail_freqs[RTE_MAX_LCORE][NUM_FREQS];
28 static uint32_t total_avail_freqs[RTE_MAX_LCORE];
30 static uint32_t freq_index[NUM_FREQ];
33 get_freq_index(enum freq_val index)
35 return freq_index[index];
40 set_power_freq(int lcore_id, enum freq_val freq, bool specific_freq)
43 uint32_t power_freq_index;
45 power_freq_index = get_freq_index(freq);
47 power_freq_index = freq;
49 err = rte_power_set_freq(lcore_id, power_freq_index);
55 static __rte_always_inline void
56 exit_training_state(struct priority_worker *poll_stats)
58 RTE_SET_USED(poll_stats);
61 static __rte_always_inline void
62 enter_training_state(struct priority_worker *poll_stats)
64 poll_stats->iter_counter = 0;
65 poll_stats->cur_freq = LOW;
66 poll_stats->queue_state = TRAINING;
69 static __rte_always_inline void
70 enter_normal_state(struct priority_worker *poll_stats)
72 /* Clear the averages arrays and strs */
73 memset(poll_stats->edpi_av, 0, sizeof(poll_stats->edpi_av));
75 memset(poll_stats->ppi_av, 0, sizeof(poll_stats->ppi_av));
78 poll_stats->cur_freq = MED;
79 poll_stats->iter_counter = 0;
80 poll_stats->threshold_ctr = 0;
81 poll_stats->queue_state = MED_NORMAL;
82 RTE_LOG(INFO, POWER, "Set the power freq to MED\n");
83 set_power_freq(poll_stats->lcore_id, MED, false);
85 poll_stats->thresh[MED].threshold_percent = med_to_high_threshold;
86 poll_stats->thresh[HGH].threshold_percent = high_to_med_threshold;
89 static __rte_always_inline void
90 enter_busy_state(struct priority_worker *poll_stats)
92 memset(poll_stats->edpi_av, 0, sizeof(poll_stats->edpi_av));
94 memset(poll_stats->ppi_av, 0, sizeof(poll_stats->ppi_av));
97 poll_stats->cur_freq = HGH;
98 poll_stats->iter_counter = 0;
99 poll_stats->threshold_ctr = 0;
100 poll_stats->queue_state = HGH_BUSY;
101 set_power_freq(poll_stats->lcore_id, HGH, false);
104 static __rte_always_inline void
105 enter_purge_state(struct priority_worker *poll_stats)
107 poll_stats->iter_counter = 0;
108 poll_stats->queue_state = LOW_PURGE;
111 static __rte_always_inline void
112 set_state(struct priority_worker *poll_stats,
113 enum queue_state new_state)
115 enum queue_state old_state = poll_stats->queue_state;
116 if (old_state != new_state) {
118 /* Call any old state exit functions */
119 if (old_state == TRAINING)
120 exit_training_state(poll_stats);
122 /* Call any new state entry functions */
123 if (new_state == TRAINING)
124 enter_training_state(poll_stats);
125 if (new_state == MED_NORMAL)
126 enter_normal_state(poll_stats);
127 if (new_state == HGH_BUSY)
128 enter_busy_state(poll_stats);
129 if (new_state == LOW_PURGE)
130 enter_purge_state(poll_stats);
134 static __rte_always_inline void
135 set_policy(struct priority_worker *poll_stats,
136 struct ep_policy *policy)
138 set_state(poll_stats, policy->state);
140 if (policy->state == TRAINING)
143 poll_stats->thresh[MED_NORMAL].base_edpi = policy->med_base_edpi;
144 poll_stats->thresh[HGH_BUSY].base_edpi = policy->hgh_base_edpi;
146 poll_stats->thresh[MED_NORMAL].trained = true;
147 poll_stats->thresh[HGH_BUSY].trained = true;
152 update_training_stats(struct priority_worker *poll_stats,
155 uint32_t max_train_iter)
157 RTE_SET_USED(specific_freq);
159 uint64_t p0_empty_deq;
161 if (poll_stats->cur_freq == freq &&
162 poll_stats->thresh[freq].trained == false) {
163 if (poll_stats->thresh[freq].cur_train_iter == 0) {
165 set_power_freq(poll_stats->lcore_id,
166 freq, specific_freq);
168 poll_stats->empty_dequeues_prev =
169 poll_stats->empty_dequeues;
171 poll_stats->thresh[freq].cur_train_iter++;
174 } else if (poll_stats->thresh[freq].cur_train_iter
177 p0_empty_deq = poll_stats->empty_dequeues -
178 poll_stats->empty_dequeues_prev;
180 poll_stats->empty_dequeues_prev =
181 poll_stats->empty_dequeues;
183 poll_stats->thresh[freq].base_edpi += p0_empty_deq;
184 poll_stats->thresh[freq].cur_train_iter++;
187 if (poll_stats->thresh[freq].trained == false) {
188 poll_stats->thresh[freq].base_edpi =
189 poll_stats->thresh[freq].base_edpi /
192 /* Add on a factor of 0.05%
193 * this should remove any
194 * false negatives when the system is 0% busy
196 poll_stats->thresh[freq].base_edpi +=
197 poll_stats->thresh[freq].base_edpi / 2000;
199 poll_stats->thresh[freq].trained = true;
200 poll_stats->cur_freq++;
207 static __rte_always_inline uint32_t
208 update_stats(struct priority_worker *poll_stats)
210 uint64_t tot_edpi = 0;
213 struct priority_worker *s = poll_stats;
215 uint64_t cur_edpi = s->empty_dequeues - s->empty_dequeues_prev;
217 s->empty_dequeues_prev = s->empty_dequeues;
219 uint64_t ppi = s->num_dequeue_pkts - s->num_dequeue_pkts_prev;
221 s->num_dequeue_pkts_prev = s->num_dequeue_pkts;
223 if (s->thresh[s->cur_freq].base_edpi < cur_edpi) {
225 /* edpi mean empty poll counter difference per interval */
226 RTE_LOG(DEBUG, POWER, "cur_edpi is too large "
227 "cur edpi %"PRId64" "
228 "base edpi %"PRId64"\n",
230 s->thresh[s->cur_freq].base_edpi);
231 /* Value to make us fail need debug log*/
235 s->edpi_av[s->ec++ % BINS_AV] = cur_edpi;
236 s->ppi_av[s->pc++ % BINS_AV] = ppi;
238 for (j = 0; j < BINS_AV; j++) {
239 tot_edpi += s->edpi_av[j];
242 tot_edpi = tot_edpi / BINS_AV;
244 percent = 100 - (uint32_t)(((float)tot_edpi /
245 (float)s->thresh[s->cur_freq].base_edpi) * 100);
247 return (uint32_t)percent;
251 static __rte_always_inline void
252 update_stats_normal(struct priority_worker *poll_stats)
256 if (poll_stats->thresh[poll_stats->cur_freq].base_edpi == 0) {
258 enum freq_val cur_freq = poll_stats->cur_freq;
260 /* edpi mean empty poll counter difference per interval */
261 RTE_LOG(DEBUG, POWER, "cure freq is %d, edpi is %"PRIu64"\n",
263 poll_stats->thresh[cur_freq].base_edpi);
267 percent = update_stats(poll_stats);
270 /* edpi mean empty poll counter difference per interval */
271 RTE_LOG(DEBUG, POWER, "Edpi is bigger than threshold\n");
275 if (poll_stats->cur_freq == LOW)
276 RTE_LOG(INFO, POWER, "Purge Mode is not currently supported\n");
277 else if (poll_stats->cur_freq == MED) {
280 poll_stats->thresh[MED].threshold_percent) {
282 if (poll_stats->threshold_ctr < INTERVALS_PER_SECOND)
283 poll_stats->threshold_ctr++;
285 set_state(poll_stats, HGH_BUSY);
286 RTE_LOG(INFO, POWER, "MOVE to HGH\n");
291 poll_stats->threshold_ctr = 0;
294 } else if (poll_stats->cur_freq == HGH) {
297 poll_stats->thresh[HGH].threshold_percent) {
299 if (poll_stats->threshold_ctr < INTERVALS_PER_SECOND)
300 poll_stats->threshold_ctr++;
302 set_state(poll_stats, MED_NORMAL);
303 RTE_LOG(INFO, POWER, "MOVE to MED\n");
307 poll_stats->threshold_ctr = 0;
314 empty_poll_training(struct priority_worker *poll_stats,
315 uint32_t max_train_iter)
318 if (poll_stats->iter_counter < INTERVALS_PER_SECOND) {
319 poll_stats->iter_counter++;
324 update_training_stats(poll_stats,
329 update_training_stats(poll_stats,
334 update_training_stats(poll_stats,
340 if (poll_stats->thresh[LOW].trained == true
341 && poll_stats->thresh[MED].trained == true
342 && poll_stats->thresh[HGH].trained == true) {
344 set_state(poll_stats, MED_NORMAL);
346 RTE_LOG(INFO, POWER, "LOW threshold is %"PRIu64"\n",
347 poll_stats->thresh[LOW].base_edpi);
349 RTE_LOG(INFO, POWER, "MED threshold is %"PRIu64"\n",
350 poll_stats->thresh[MED].base_edpi);
353 RTE_LOG(INFO, POWER, "HIGH threshold is %"PRIu64"\n",
354 poll_stats->thresh[HGH].base_edpi);
356 RTE_LOG(INFO, POWER, "Training is Complete for %d\n",
357 poll_stats->lcore_id);
364 rte_empty_poll_detection(struct rte_timer *tim, void *arg)
369 struct priority_worker *poll_stats;
375 for (i = 0; i < NUM_NODES; i++) {
377 poll_stats = &(ep_params->wrk_data.wrk_stats[i]);
379 if (rte_lcore_is_enabled(poll_stats->lcore_id) == 0)
382 switch (poll_stats->queue_state) {
384 empty_poll_training(poll_stats,
385 ep_params->max_train_iter);
390 update_stats_normal(poll_stats);
405 rte_power_empty_poll_stat_init(struct ep_params **eptr, uint8_t *freq_tlb,
406 struct ep_policy *policy)
409 /* Allocate the ep_params structure */
410 ep_params = rte_zmalloc_socket(NULL,
411 sizeof(struct ep_params),
418 if (freq_tlb == NULL) {
419 freq_index[LOW] = 14;
423 freq_index[LOW] = freq_tlb[LOW];
424 freq_index[MED] = freq_tlb[MED];
425 freq_index[HGH] = freq_tlb[HGH];
428 RTE_LOG(INFO, POWER, "Initialize the Empty Poll\n");
430 /* Train for pre-defined period */
431 ep_params->max_train_iter = INTERVALS_PER_SECOND * SECONDS_TO_TRAIN_FOR;
433 struct stats_data *w = &ep_params->wrk_data;
437 /* initialize all wrk_stats state */
438 for (i = 0; i < NUM_NODES; i++) {
440 if (rte_lcore_is_enabled(i) == 0)
442 /*init the freqs table */
443 total_avail_freqs[i] = rte_power_freqs(i,
447 RTE_LOG(INFO, POWER, "total avail freq is %d , lcoreid %d\n",
448 total_avail_freqs[i],
451 if (get_freq_index(LOW) > total_avail_freqs[i])
454 if (rte_get_main_lcore() != i) {
455 w->wrk_stats[i].lcore_id = i;
456 set_policy(&w->wrk_stats[i], policy);
464 rte_power_empty_poll_stat_free(void)
467 RTE_LOG(INFO, POWER, "Close the Empty Poll\n");
469 if (ep_params != NULL)
474 rte_power_empty_poll_stat_update(unsigned int lcore_id)
476 struct priority_worker *poll_stats;
478 if (lcore_id >= NUM_NODES)
481 poll_stats = &(ep_params->wrk_data.wrk_stats[lcore_id]);
483 if (poll_stats->lcore_id == 0)
484 poll_stats->lcore_id = lcore_id;
486 poll_stats->empty_dequeues++;
492 rte_power_poll_stat_update(unsigned int lcore_id, uint8_t nb_pkt)
495 struct priority_worker *poll_stats;
497 if (lcore_id >= NUM_NODES)
500 poll_stats = &(ep_params->wrk_data.wrk_stats[lcore_id]);
502 if (poll_stats->lcore_id == 0)
503 poll_stats->lcore_id = lcore_id;
505 poll_stats->num_dequeue_pkts += nb_pkt;
512 rte_power_empty_poll_stat_fetch(unsigned int lcore_id)
514 struct priority_worker *poll_stats;
516 if (lcore_id >= NUM_NODES)
519 poll_stats = &(ep_params->wrk_data.wrk_stats[lcore_id]);
521 if (poll_stats->lcore_id == 0)
522 poll_stats->lcore_id = lcore_id;
524 return poll_stats->empty_dequeues;
528 rte_power_poll_stat_fetch(unsigned int lcore_id)
530 struct priority_worker *poll_stats;
532 if (lcore_id >= NUM_NODES)
535 poll_stats = &(ep_params->wrk_data.wrk_stats[lcore_id]);
537 if (poll_stats->lcore_id == 0)
538 poll_stats->lcore_id = lcore_id;
540 return poll_stats->num_dequeue_pkts;