1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/types.h>
14 #include <sys/epoll.h>
15 #include <sys/queue.h>
17 #include <sys/socket.h>
18 #include <sys/select.h>
22 #pragma message "Jansson dev libs unavailable, not including JSON parsing"
24 #include <rte_string_fns.h>
26 #include <rte_memory.h>
27 #include <rte_malloc.h>
28 #include <rte_atomic.h>
29 #include <rte_cycles.h>
30 #include <rte_ethdev.h>
32 #include <rte_pmd_i40e.h>
34 #include <rte_power.h>
36 #include <libvirt/libvirt.h>
37 #include "channel_monitor.h"
38 #include "channel_manager.h"
39 #include "power_manager.h"
40 #include "oob_monitor.h"
42 #define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1
44 #define MAX_EVENTS 256
46 uint64_t vsi_pkt_count_prev[384];
47 uint64_t rdtsc_prev[384];
48 #define MAX_JSON_STRING_LEN 1024
49 char json_data[MAX_JSON_STRING_LEN];
51 double time_period_ms = 1;
52 static volatile unsigned run_loop = 1;
53 static int global_event_fd;
54 static unsigned int policy_is_set;
55 static struct epoll_event *global_events_list;
56 static struct policy policies[RTE_MAX_LCORE];
61 struct rte_ether_addr addr;
66 str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr)
70 unsigned long o[RTE_ETHER_ADDR_LEN];
75 o[i] = strtoul(a, &end, 16);
76 if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
79 } while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0);
81 /* Junk at the end of line */
85 /* Support the format XX:XX:XX:XX:XX:XX */
86 if (i == RTE_ETHER_ADDR_LEN) {
90 ether_addr->addr_bytes[i] = (uint8_t)o[i];
92 /* Support the format XXXX:XXXX:XXXX */
93 } else if (i == RTE_ETHER_ADDR_LEN / 2) {
95 if (o[i] > UINT16_MAX)
97 ether_addr->addr_bytes[i * 2] =
99 ether_addr->addr_bytes[i * 2 + 1] =
100 (uint8_t)(o[i] & 0xff);
110 set_policy_mac(struct rte_power_channel_packet *pkt, int idx, char *mac)
115 /* Use port MAC address as the vfid */
116 ret = str_to_ether_addr(mac, &pfid.addr);
119 RTE_LOG(ERR, CHANNEL_MONITOR,
120 "Invalid mac address received in JSON\n");
125 printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
126 "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
127 pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
128 pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
129 pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
131 pkt->vfid[idx] = pfid.pfid;
136 get_resource_name_from_chn_path(const char *channel_path)
140 substr = strstr(channel_path, CHANNEL_MGR_FIFO_PATTERN_NAME);
146 get_resource_id_from_vmname(const char *vm_name)
154 while (vm_name[off] != '\0') {
155 if (isdigit(vm_name[off]))
159 result = atoi(&vm_name[off]);
160 if ((result == 0) && (vm_name[off] != '0'))
167 parse_json_to_pkt(json_t *element, struct rte_power_channel_packet *pkt,
175 memset(pkt, 0, sizeof(*pkt));
177 pkt->nb_mac_to_monitor = 0;
178 pkt->t_boost_status.tbEnabled = false;
179 pkt->workload = RTE_POWER_WL_LOW;
180 pkt->policy_to_use = RTE_POWER_POLICY_TIME;
181 pkt->command = RTE_POWER_PKT_POLICY;
182 pkt->core_type = RTE_POWER_CORE_TYPE_PHYSICAL;
184 if (vm_name == NULL) {
185 RTE_LOG(ERR, CHANNEL_MONITOR,
186 "vm_name is NULL, request rejected !\n");
190 json_object_foreach(element, key, value) {
191 if (!strcmp(key, "policy")) {
192 /* Recurse in to get the contents of profile */
193 ret = parse_json_to_pkt(value, pkt, vm_name);
196 } else if (!strcmp(key, "instruction")) {
197 /* Recurse in to get the contents of instruction */
198 ret = parse_json_to_pkt(value, pkt, vm_name);
201 } else if (!strcmp(key, "command")) {
203 strlcpy(command, json_string_value(value), 32);
204 if (!strcmp(command, "power")) {
205 pkt->command = RTE_POWER_CPU_POWER;
206 } else if (!strcmp(command, "create")) {
207 pkt->command = RTE_POWER_PKT_POLICY;
208 } else if (!strcmp(command, "destroy")) {
209 pkt->command = RTE_POWER_PKT_POLICY_REMOVE;
211 RTE_LOG(ERR, CHANNEL_MONITOR,
212 "Invalid command received in JSON\n");
215 } else if (!strcmp(key, "policy_type")) {
217 strlcpy(command, json_string_value(value), 32);
218 if (!strcmp(command, "TIME")) {
220 RTE_POWER_POLICY_TIME;
221 } else if (!strcmp(command, "TRAFFIC")) {
223 RTE_POWER_POLICY_TRAFFIC;
224 } else if (!strcmp(command, "WORKLOAD")) {
226 RTE_POWER_POLICY_WORKLOAD;
227 } else if (!strcmp(command, "BRANCH_RATIO")) {
229 RTE_POWER_POLICY_BRANCH_RATIO;
231 RTE_LOG(ERR, CHANNEL_MONITOR,
232 "Wrong policy_type received in JSON\n");
235 } else if (!strcmp(key, "workload")) {
237 strlcpy(command, json_string_value(value), 32);
238 if (!strcmp(command, "HIGH")) {
239 pkt->workload = RTE_POWER_WL_HIGH;
240 } else if (!strcmp(command, "MEDIUM")) {
241 pkt->workload = RTE_POWER_WL_MEDIUM;
242 } else if (!strcmp(command, "LOW")) {
243 pkt->workload = RTE_POWER_WL_LOW;
245 RTE_LOG(ERR, CHANNEL_MONITOR,
246 "Wrong workload received in JSON\n");
249 } else if (!strcmp(key, "busy_hours")) {
251 size_t size = json_array_size(value);
253 for (i = 0; i < size; i++) {
254 int hour = (int)json_integer_value(
255 json_array_get(value, i));
256 pkt->timer_policy.busy_hours[i] = hour;
258 } else if (!strcmp(key, "quiet_hours")) {
260 size_t size = json_array_size(value);
262 for (i = 0; i < size; i++) {
263 int hour = (int)json_integer_value(
264 json_array_get(value, i));
265 pkt->timer_policy.quiet_hours[i] = hour;
267 } else if (!strcmp(key, "mac_list")) {
269 size_t size = json_array_size(value);
271 for (i = 0; i < size; i++) {
274 json_string_value(json_array_get(value, i)),
276 set_policy_mac(pkt, i, mac);
278 pkt->nb_mac_to_monitor = size;
279 } else if (!strcmp(key, "avg_packet_thresh")) {
280 pkt->traffic_policy.avg_max_packet_thresh =
281 (uint32_t)json_integer_value(value);
282 } else if (!strcmp(key, "max_packet_thresh")) {
283 pkt->traffic_policy.max_max_packet_thresh =
284 (uint32_t)json_integer_value(value);
285 } else if (!strcmp(key, "unit")) {
287 strlcpy(unit, json_string_value(value), 32);
288 if (!strcmp(unit, "SCALE_UP")) {
289 pkt->unit = RTE_POWER_SCALE_UP;
290 } else if (!strcmp(unit, "SCALE_DOWN")) {
291 pkt->unit = RTE_POWER_SCALE_DOWN;
292 } else if (!strcmp(unit, "SCALE_MAX")) {
293 pkt->unit = RTE_POWER_SCALE_MAX;
294 } else if (!strcmp(unit, "SCALE_MIN")) {
295 pkt->unit = RTE_POWER_SCALE_MIN;
296 } else if (!strcmp(unit, "ENABLE_TURBO")) {
297 pkt->unit = RTE_POWER_ENABLE_TURBO;
298 } else if (!strcmp(unit, "DISABLE_TURBO")) {
299 pkt->unit = RTE_POWER_DISABLE_TURBO;
301 RTE_LOG(ERR, CHANNEL_MONITOR,
302 "Invalid command received in JSON\n");
306 RTE_LOG(ERR, CHANNEL_MONITOR,
307 "Unknown key received in JSON string: %s\n",
311 resource_id = get_resource_id_from_vmname(vm_name);
312 if (resource_id < 0) {
313 RTE_LOG(ERR, CHANNEL_MONITOR,
314 "Could not get resource_id from vm_name:%s\n",
318 strlcpy(pkt->vm_name, vm_name, RTE_POWER_VM_MAX_NAME_SZ);
319 pkt->resource_id = resource_id;
325 void channel_monitor_exit(void)
328 rte_free(global_events_list);
332 core_share(int pNo, int z, int x, int t)
334 if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
335 if (strcmp(policies[pNo].pkt.vm_name,
336 lvm_info[x].vm_name) != 0) {
337 policies[pNo].core_share[z].status = 1;
338 power_manager_scale_core_max(
339 policies[pNo].core_share[z].pcpu);
345 core_share_status(int pNo)
348 int noVms = 0, noVcpus = 0, z, x, t;
350 get_all_vm(&noVms, &noVcpus);
352 /* Reset Core Share Status. */
353 for (z = 0; z < noVcpus; z++)
354 policies[pNo].core_share[z].status = 0;
356 /* Foreach vcpu in a policy. */
357 for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
358 /* Foreach VM on the platform. */
359 for (x = 0; x < noVms; x++) {
360 /* Foreach vcpu of VMs on platform. */
361 for (t = 0; t < lvm_info[x].num_cpus; t++)
362 core_share(pNo, z, x, t);
369 pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count)
373 if (pol->pkt.policy_to_use == RTE_POWER_POLICY_BRANCH_RATIO) {
374 ci->cd[pcpu].oob_enabled = 1;
375 ret = add_core_to_monitor(pcpu);
377 RTE_LOG(INFO, CHANNEL_MONITOR,
378 "Monitoring pcpu %d OOB for %s\n",
379 pcpu, pol->pkt.vm_name);
381 RTE_LOG(ERR, CHANNEL_MONITOR,
382 "Error monitoring pcpu %d OOB for %s\n",
383 pcpu, pol->pkt.vm_name);
386 pol->core_share[count].pcpu = pcpu;
387 RTE_LOG(INFO, CHANNEL_MONITOR,
388 "Monitoring pcpu %d for %s\n",
389 pcpu, pol->pkt.vm_name);
395 get_pcpu_to_control(struct policy *pol)
398 /* Convert vcpu to pcpu. */
401 struct core_info *ci;
403 ci = get_core_info();
405 RTE_LOG(DEBUG, CHANNEL_MONITOR,
406 "Looking for pcpu for %s\n", pol->pkt.vm_name);
409 * So now that we're handling virtual and physical cores, we need to
410 * differenciate between them when adding them to the branch monitor.
411 * Virtual cores need to be converted to physical cores.
413 if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {
415 * If the cores in the policy are virtual, we need to map them
416 * to physical core. We look up the vm info and use that for
419 get_info_vm(pol->pkt.vm_name, &info);
420 for (count = 0; count < pol->pkt.num_vcpu; count++) {
421 pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]];
422 pcpu_monitor(pol, ci, pcpu, count);
426 * If the cores in the policy are physical, we just use
427 * those core id's directly.
429 for (count = 0; count < pol->pkt.num_vcpu; count++) {
430 pcpu = pol->pkt.vcpu_to_control[count];
431 pcpu_monitor(pol, ci, pcpu, count);
437 get_pfid(struct policy *pol)
442 for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
444 RTE_ETH_FOREACH_DEV(x) {
446 ret = rte_pmd_i40e_query_vfid_by_mac(x,
447 (struct rte_ether_addr *)&(pol->pkt.vfid[i]));
451 if (ret != -EINVAL) {
456 if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
457 RTE_LOG(INFO, CHANNEL_MONITOR,
458 "Error with Policy. MAC not found on "
469 update_policy(struct rte_power_channel_packet *pkt)
472 unsigned int updated = 0;
476 RTE_LOG(INFO, CHANNEL_MONITOR,
477 "Applying policy for %s\n", pkt->vm_name);
479 for (i = 0; i < RTE_DIM(policies); i++) {
480 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
481 /* Copy the contents of *pkt into the policy.pkt */
482 policies[i].pkt = *pkt;
483 get_pcpu_to_control(&policies[i]);
484 /* Check Eth dev only for Traffic policy */
485 if (policies[i].pkt.policy_to_use ==
486 RTE_POWER_POLICY_TRAFFIC) {
487 if (get_pfid(&policies[i]) < 0) {
492 core_share_status(i);
493 policies[i].enabled = 1;
498 for (i = 0; i < RTE_DIM(policies); i++) {
499 if (policies[i].enabled == 0) {
500 policies[i].pkt = *pkt;
501 get_pcpu_to_control(&policies[i]);
502 /* Check Eth dev only for Traffic policy */
503 if (policies[i].pkt.policy_to_use ==
504 RTE_POWER_POLICY_TRAFFIC) {
505 if (get_pfid(&policies[i]) < 0) {
510 core_share_status(i);
511 policies[i].enabled = 1;
520 remove_policy(struct rte_power_channel_packet *pkt __rte_unused)
525 * Disabling the policy is simply a case of setting
528 for (i = 0; i < RTE_DIM(policies); i++) {
529 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
530 policies[i].enabled = 0;
538 get_pkt_diff(struct policy *pol)
541 uint64_t vsi_pkt_count,
543 vsi_pkt_count_prev_total = 0;
544 double rdtsc_curr, rdtsc_diff, diff;
547 struct rte_eth_stats vf_stats;
550 for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
554 if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
555 vsi_pkt_count = vf_stats.ipackets;
562 vsi_pkt_total += vsi_pkt_count;
564 vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
565 vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
568 rdtsc_curr = rte_rdtsc_precise();
569 rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
570 rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
572 diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
573 ((double)rte_get_tsc_hz() / rdtsc_diff);
579 apply_traffic_profile(struct policy *pol)
585 diff = get_pkt_diff(pol);
587 if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
588 for (count = 0; count < pol->pkt.num_vcpu; count++) {
589 if (pol->core_share[count].status != 1)
590 power_manager_scale_core_max(
591 pol->core_share[count].pcpu);
593 } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
594 for (count = 0; count < pol->pkt.num_vcpu; count++) {
595 if (pol->core_share[count].status != 1)
596 power_manager_scale_core_med(
597 pol->core_share[count].pcpu);
599 } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
600 for (count = 0; count < pol->pkt.num_vcpu; count++) {
601 if (pol->core_share[count].status != 1)
602 power_manager_scale_core_min(
603 pol->core_share[count].pcpu);
609 apply_time_profile(struct policy *pol)
615 char time_string[40];
617 /* Obtain the time of day, and convert it to a tm struct. */
618 gettimeofday(&tv, NULL);
619 ptm = localtime(&tv.tv_sec);
620 /* Format the date and time, down to a single second. */
621 strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
623 for (x = 0; x < RTE_POWER_HOURS_PER_DAY; x++) {
625 if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
626 for (count = 0; count < pol->pkt.num_vcpu; count++) {
627 if (pol->core_share[count].status != 1) {
628 power_manager_scale_core_max(
629 pol->core_share[count].pcpu);
633 } else if (ptm->tm_hour ==
634 pol->pkt.timer_policy.quiet_hours[x]) {
635 for (count = 0; count < pol->pkt.num_vcpu; count++) {
636 if (pol->core_share[count].status != 1) {
637 power_manager_scale_core_min(
638 pol->core_share[count].pcpu);
642 } else if (ptm->tm_hour ==
643 pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
644 apply_traffic_profile(pol);
651 apply_workload_profile(struct policy *pol)
656 if (pol->pkt.workload == RTE_POWER_WL_HIGH) {
657 for (count = 0; count < pol->pkt.num_vcpu; count++) {
658 if (pol->core_share[count].status != 1)
659 power_manager_scale_core_max(
660 pol->core_share[count].pcpu);
662 } else if (pol->pkt.workload == RTE_POWER_WL_MEDIUM) {
663 for (count = 0; count < pol->pkt.num_vcpu; count++) {
664 if (pol->core_share[count].status != 1)
665 power_manager_scale_core_med(
666 pol->core_share[count].pcpu);
668 } else if (pol->pkt.workload == RTE_POWER_WL_LOW) {
669 for (count = 0; count < pol->pkt.num_vcpu; count++) {
670 if (pol->core_share[count].status != 1)
671 power_manager_scale_core_min(
672 pol->core_share[count].pcpu);
678 apply_policy(struct policy *pol)
681 struct rte_power_channel_packet *pkt = &pol->pkt;
683 /*Check policy to use*/
684 if (pkt->policy_to_use == RTE_POWER_POLICY_TRAFFIC)
685 apply_traffic_profile(pol);
686 else if (pkt->policy_to_use == RTE_POWER_POLICY_TIME)
687 apply_time_profile(pol);
688 else if (pkt->policy_to_use == RTE_POWER_POLICY_WORKLOAD)
689 apply_workload_profile(pol);
693 write_binary_packet(void *buffer,
695 struct channel_info *chan_info)
699 if (buffer_len == 0 || buffer == NULL)
702 if (chan_info->fd < 0) {
703 RTE_LOG(ERR, CHANNEL_MONITOR, "Channel is not connected\n");
707 while (buffer_len > 0) {
708 ret = write(chan_info->fd, buffer, buffer_len);
712 RTE_LOG(ERR, CHANNEL_MONITOR, "Write function failed due to %s.\n",
716 buffer = (char *)buffer + ret;
723 send_freq(struct rte_power_channel_packet *pkt,
724 struct channel_info *chan_info,
727 unsigned int vcore_id = pkt->resource_id;
728 struct rte_power_channel_packet_freq_list channel_pkt_freq_list;
731 if (get_info_vm(pkt->vm_name, &info) != 0)
734 if (!freq_list && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM)
737 if (!info.allow_query)
740 channel_pkt_freq_list.command = RTE_POWER_FREQ_LIST;
741 channel_pkt_freq_list.num_vcpu = info.num_vcpus;
745 for (i = 0; i < info.num_vcpus; i++)
746 channel_pkt_freq_list.freq_list[i] =
747 power_manager_get_current_frequency(info.pcpu_map[i]);
749 channel_pkt_freq_list.freq_list[vcore_id] =
750 power_manager_get_current_frequency(info.pcpu_map[vcore_id]);
753 return write_binary_packet(&channel_pkt_freq_list,
754 sizeof(channel_pkt_freq_list),
759 send_capabilities(struct rte_power_channel_packet *pkt,
760 struct channel_info *chan_info,
763 unsigned int vcore_id = pkt->resource_id;
764 struct rte_power_channel_packet_caps_list channel_pkt_caps_list;
766 struct rte_power_core_capabilities caps;
769 if (get_info_vm(pkt->vm_name, &info) != 0)
772 if (!list_requested && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM)
775 if (!info.allow_query)
778 channel_pkt_caps_list.command = RTE_POWER_CAPS_LIST;
779 channel_pkt_caps_list.num_vcpu = info.num_vcpus;
781 if (list_requested) {
783 for (i = 0; i < info.num_vcpus; i++) {
784 ret = rte_power_get_capabilities(info.pcpu_map[i],
787 channel_pkt_caps_list.turbo[i] =
789 channel_pkt_caps_list.priority[i] =
796 ret = rte_power_get_capabilities(info.pcpu_map[vcore_id],
799 channel_pkt_caps_list.turbo[vcore_id] =
801 channel_pkt_caps_list.priority[vcore_id] =
807 return write_binary_packet(&channel_pkt_caps_list,
808 sizeof(channel_pkt_caps_list),
813 send_ack_for_received_cmd(struct rte_power_channel_packet *pkt,
814 struct channel_info *chan_info,
817 pkt->command = command;
818 return write_binary_packet(pkt,
824 process_request(struct rte_power_channel_packet *pkt,
825 struct channel_info *chan_info)
829 if (chan_info == NULL)
832 if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED,
833 CHANNEL_MGR_CHANNEL_PROCESSING) == 0)
836 if (pkt->command == RTE_POWER_CPU_POWER) {
837 unsigned int core_num;
839 if (pkt->core_type == RTE_POWER_CORE_TYPE_VIRTUAL)
840 core_num = get_pcpu(chan_info, pkt->resource_id);
842 core_num = pkt->resource_id;
844 RTE_LOG(DEBUG, CHANNEL_MONITOR, "Processing requested cmd for cpu:%d\n",
848 bool valid_unit = true;
851 case(RTE_POWER_SCALE_MIN):
852 scale_res = power_manager_scale_core_min(core_num);
854 case(RTE_POWER_SCALE_MAX):
855 scale_res = power_manager_scale_core_max(core_num);
857 case(RTE_POWER_SCALE_DOWN):
858 scale_res = power_manager_scale_core_down(core_num);
860 case(RTE_POWER_SCALE_UP):
861 scale_res = power_manager_scale_core_up(core_num);
863 case(RTE_POWER_ENABLE_TURBO):
864 scale_res = power_manager_enable_turbo_core(core_num);
866 case(RTE_POWER_DISABLE_TURBO):
867 scale_res = power_manager_disable_turbo_core(core_num);
875 ret = send_ack_for_received_cmd(pkt,
881 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n");
883 RTE_LOG(ERR, CHANNEL_MONITOR, "Unexpected unit type.\n");
887 if (pkt->command == RTE_POWER_PKT_POLICY) {
888 RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n",
890 int ret = send_ack_for_received_cmd(pkt,
894 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n");
899 if (pkt->command == RTE_POWER_PKT_POLICY_REMOVE) {
900 ret = remove_policy(pkt);
902 RTE_LOG(INFO, CHANNEL_MONITOR,
903 "Removed policy %s\n", pkt->vm_name);
905 RTE_LOG(INFO, CHANNEL_MONITOR,
906 "Policy %s does not exist\n", pkt->vm_name);
909 if (pkt->command == RTE_POWER_QUERY_FREQ_LIST ||
910 pkt->command == RTE_POWER_QUERY_FREQ) {
912 RTE_LOG(INFO, CHANNEL_MONITOR,
913 "Frequency for %s requested.\n", pkt->vm_name);
914 int ret = send_freq(pkt,
916 pkt->command == RTE_POWER_QUERY_FREQ_LIST);
918 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during frequency sending.\n");
921 if (pkt->command == RTE_POWER_QUERY_CAPS_LIST ||
922 pkt->command == RTE_POWER_QUERY_CAPS) {
924 RTE_LOG(INFO, CHANNEL_MONITOR,
925 "Capabilities for %s requested.\n", pkt->vm_name);
926 int ret = send_capabilities(pkt,
928 pkt->command == RTE_POWER_QUERY_CAPS_LIST);
930 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending capabilities.\n");
934 * Return is not checked as channel status may have been set to DISABLED
935 * from management thread
937 rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING,
938 CHANNEL_MGR_CHANNEL_CONNECTED);
944 add_channel_to_monitor(struct channel_info **chan_info)
946 struct channel_info *info = *chan_info;
947 struct epoll_event event;
949 event.events = EPOLLIN;
950 event.data.ptr = info;
951 if (epoll_ctl(global_event_fd, EPOLL_CTL_ADD, info->fd, &event) < 0) {
952 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to add channel '%s' "
953 "to epoll\n", info->channel_path);
956 RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' "
957 "to monitor\n", info->channel_path);
962 remove_channel_from_monitor(struct channel_info *chan_info)
964 if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL,
965 chan_info->fd, NULL) < 0) {
966 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' "
967 "from epoll\n", chan_info->channel_path);
974 channel_monitor_init(void)
976 global_event_fd = epoll_create1(0);
977 if (global_event_fd == 0) {
978 RTE_LOG(ERR, CHANNEL_MONITOR,
979 "Error creating epoll context with error %s\n",
983 global_events_list = rte_malloc("epoll_events",
984 sizeof(*global_events_list)
985 * MAX_EVENTS, RTE_CACHE_LINE_SIZE);
986 if (global_events_list == NULL) {
987 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
995 read_binary_packet(struct channel_info *chan_info)
997 struct rte_power_channel_packet pkt;
999 int buffer_len = sizeof(pkt);
1000 int n_bytes, err = 0;
1002 while (buffer_len > 0) {
1003 n_bytes = read(chan_info->fd,
1004 buffer, buffer_len);
1005 if (n_bytes == buffer_len)
1009 RTE_LOG(DEBUG, CHANNEL_MONITOR,
1010 "Received error on "
1011 "channel '%s' read: %s\n",
1012 chan_info->channel_path,
1014 remove_channel(&chan_info);
1017 buffer = (char *)buffer + n_bytes;
1018 buffer_len -= n_bytes;
1021 process_request(&pkt, chan_info);
1026 read_json_packet(struct channel_info *chan_info)
1028 struct rte_power_channel_packet pkt;
1032 const char *resource_name;
1037 /* read opening brace to closing brace */
1042 n_bytes = read(chan_info->fd, &json_data[idx], 1);
1045 if (json_data[idx] == '{')
1047 if (json_data[idx] == '}')
1049 if ((indent > 0) || (idx > 0))
1053 if (idx >= MAX_JSON_STRING_LEN-1)
1055 } while (indent > 0);
1057 json_data[idx] = '\0';
1059 if (strlen(json_data) == 0)
1062 printf("got [%s]\n", json_data);
1064 root = json_loads(json_data, 0, &error);
1067 resource_name = get_resource_name_from_chn_path(
1068 chan_info->channel_path);
1070 * Because our data is now in the json
1071 * object, we can overwrite the pkt
1072 * with a rte_power_channel_packet struct, using
1073 * parse_json_to_pkt()
1075 ret = parse_json_to_pkt(root, &pkt, resource_name);
1078 RTE_LOG(ERR, CHANNEL_MONITOR,
1079 "Error validating JSON profile data\n");
1082 start = strstr(pkt.vm_name,
1083 CHANNEL_MGR_FIFO_PATTERN_NAME);
1084 if (start != NULL) {
1085 /* move past pattern to start of fifo id */
1086 start += strlen(CHANNEL_MGR_FIFO_PATTERN_NAME);
1089 n = (uint32_t)strtoul(start, &end, 10);
1091 if (end[0] == '\0') {
1092 /* Add core id to core list */
1094 pkt.vcpu_to_control[0] = n;
1095 process_request(&pkt, chan_info);
1097 RTE_LOG(ERR, CHANNEL_MONITOR,
1098 "Cannot extract core id from fifo name\n");
1101 process_request(&pkt, chan_info);
1104 RTE_LOG(ERR, CHANNEL_MONITOR,
1105 "JSON error on line %d: %s\n",
1106 error.line, error.text);
1108 } while (n_bytes > 0);
1113 run_channel_monitor(void)
1118 n_events = epoll_wait(global_event_fd, global_events_list,
1122 for (i = 0; i < n_events; i++) {
1123 struct channel_info *chan_info = (struct channel_info *)
1124 global_events_list[i].data.ptr;
1125 if ((global_events_list[i].events & EPOLLERR) ||
1126 (global_events_list[i].events & EPOLLHUP)) {
1127 RTE_LOG(INFO, CHANNEL_MONITOR,
1128 "Remote closed connection for "
1130 chan_info->channel_path);
1131 remove_channel(&chan_info);
1134 if (global_events_list[i].events & EPOLLIN) {
1136 switch (chan_info->type) {
1137 case CHANNEL_TYPE_BINARY:
1138 read_binary_packet(chan_info);
1141 case CHANNEL_TYPE_JSON:
1142 read_json_packet(chan_info);
1150 rte_delay_us(time_period_ms*1000);
1151 if (policy_is_set) {
1154 for (j = 0; j < RTE_DIM(policies); j++) {
1155 if (policies[j].enabled == 1)
1156 apply_policy(&policies[j]);