1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/types.h>
14 #include <sys/epoll.h>
15 #include <sys/queue.h>
17 #include <sys/socket.h>
18 #include <sys/select.h>
22 #pragma message "Jansson dev libs unavailable, not including JSON parsing"
24 #include <rte_string_fns.h>
26 #include <rte_memory.h>
27 #include <rte_malloc.h>
28 #include <rte_atomic.h>
29 #include <rte_cycles.h>
30 #include <rte_ethdev.h>
31 #ifdef RTE_LIBRTE_I40E_PMD
32 #include <rte_pmd_i40e.h>
35 #include <libvirt/libvirt.h>
36 #include "channel_monitor.h"
37 #include "channel_commands.h"
38 #include "channel_manager.h"
39 #include "power_manager.h"
40 #include "oob_monitor.h"
42 #define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1
44 #define MAX_EVENTS 256
46 uint64_t vsi_pkt_count_prev[384];
47 uint64_t rdtsc_prev[384];
48 #define MAX_JSON_STRING_LEN 1024
49 char json_data[MAX_JSON_STRING_LEN];
51 double time_period_ms = 1;
52 static volatile unsigned run_loop = 1;
53 static int global_event_fd;
54 static unsigned int policy_is_set;
55 static struct epoll_event *global_events_list;
56 static struct policy policies[RTE_MAX_LCORE];
61 struct rte_ether_addr addr;
66 str_to_ether_addr(const char *a, struct rte_ether_addr *ether_addr)
70 unsigned long o[RTE_ETHER_ADDR_LEN];
75 o[i] = strtoul(a, &end, 16);
76 if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
79 } while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0);
81 /* Junk at the end of line */
85 /* Support the format XX:XX:XX:XX:XX:XX */
86 if (i == RTE_ETHER_ADDR_LEN) {
90 ether_addr->addr_bytes[i] = (uint8_t)o[i];
92 /* Support the format XXXX:XXXX:XXXX */
93 } else if (i == RTE_ETHER_ADDR_LEN / 2) {
95 if (o[i] > UINT16_MAX)
97 ether_addr->addr_bytes[i * 2] =
99 ether_addr->addr_bytes[i * 2 + 1] =
100 (uint8_t)(o[i] & 0xff);
110 set_policy_mac(struct channel_packet *pkt, int idx, char *mac)
115 /* Use port MAC address as the vfid */
116 ret = str_to_ether_addr(mac, &pfid.addr);
119 RTE_LOG(ERR, CHANNEL_MONITOR,
120 "Invalid mac address received in JSON\n");
125 printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
126 "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
127 pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
128 pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
129 pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
131 pkt->vfid[idx] = pfid.pfid;
136 get_resource_name_from_chn_path(const char *channel_path)
140 substr = strstr(channel_path, CHANNEL_MGR_FIFO_PATTERN_NAME);
146 get_resource_id_from_vmname(const char *vm_name)
154 while (vm_name[off] != '\0') {
155 if (isdigit(vm_name[off]))
159 result = atoi(&vm_name[off]);
160 if ((result == 0) && (vm_name[off] != '0'))
167 parse_json_to_pkt(json_t *element, struct channel_packet *pkt,
175 memset(pkt, 0, sizeof(struct channel_packet));
177 pkt->nb_mac_to_monitor = 0;
178 pkt->t_boost_status.tbEnabled = false;
180 pkt->policy_to_use = TIME;
181 pkt->command = PKT_POLICY;
182 pkt->core_type = CORE_TYPE_PHYSICAL;
184 if (vm_name == NULL) {
185 RTE_LOG(ERR, CHANNEL_MONITOR,
186 "vm_name is NULL, request rejected !\n");
190 json_object_foreach(element, key, value) {
191 if (!strcmp(key, "policy")) {
192 /* Recurse in to get the contents of profile */
193 ret = parse_json_to_pkt(value, pkt, vm_name);
196 } else if (!strcmp(key, "instruction")) {
197 /* Recurse in to get the contents of instruction */
198 ret = parse_json_to_pkt(value, pkt, vm_name);
201 } else if (!strcmp(key, "command")) {
203 strlcpy(command, json_string_value(value), 32);
204 if (!strcmp(command, "power")) {
205 pkt->command = CPU_POWER;
206 } else if (!strcmp(command, "create")) {
207 pkt->command = PKT_POLICY;
208 } else if (!strcmp(command, "destroy")) {
209 pkt->command = PKT_POLICY_REMOVE;
211 RTE_LOG(ERR, CHANNEL_MONITOR,
212 "Invalid command received in JSON\n");
215 } else if (!strcmp(key, "policy_type")) {
217 strlcpy(command, json_string_value(value), 32);
218 if (!strcmp(command, "TIME")) {
219 pkt->policy_to_use = TIME;
220 } else if (!strcmp(command, "TRAFFIC")) {
221 pkt->policy_to_use = TRAFFIC;
222 } else if (!strcmp(command, "WORKLOAD")) {
223 pkt->policy_to_use = WORKLOAD;
224 } else if (!strcmp(command, "BRANCH_RATIO")) {
225 pkt->policy_to_use = BRANCH_RATIO;
227 RTE_LOG(ERR, CHANNEL_MONITOR,
228 "Wrong policy_type received in JSON\n");
231 } else if (!strcmp(key, "workload")) {
233 strlcpy(command, json_string_value(value), 32);
234 if (!strcmp(command, "HIGH")) {
235 pkt->workload = HIGH;
236 } else if (!strcmp(command, "MEDIUM")) {
237 pkt->workload = MEDIUM;
238 } else if (!strcmp(command, "LOW")) {
241 RTE_LOG(ERR, CHANNEL_MONITOR,
242 "Wrong workload received in JSON\n");
245 } else if (!strcmp(key, "busy_hours")) {
247 size_t size = json_array_size(value);
249 for (i = 0; i < size; i++) {
250 int hour = (int)json_integer_value(
251 json_array_get(value, i));
252 pkt->timer_policy.busy_hours[i] = hour;
254 } else if (!strcmp(key, "quiet_hours")) {
256 size_t size = json_array_size(value);
258 for (i = 0; i < size; i++) {
259 int hour = (int)json_integer_value(
260 json_array_get(value, i));
261 pkt->timer_policy.quiet_hours[i] = hour;
263 } else if (!strcmp(key, "mac_list")) {
265 size_t size = json_array_size(value);
267 for (i = 0; i < size; i++) {
270 json_string_value(json_array_get(value, i)),
272 set_policy_mac(pkt, i, mac);
274 pkt->nb_mac_to_monitor = size;
275 } else if (!strcmp(key, "avg_packet_thresh")) {
276 pkt->traffic_policy.avg_max_packet_thresh =
277 (uint32_t)json_integer_value(value);
278 } else if (!strcmp(key, "max_packet_thresh")) {
279 pkt->traffic_policy.max_max_packet_thresh =
280 (uint32_t)json_integer_value(value);
281 } else if (!strcmp(key, "unit")) {
283 strlcpy(unit, json_string_value(value), 32);
284 if (!strcmp(unit, "SCALE_UP")) {
285 pkt->unit = CPU_POWER_SCALE_UP;
286 } else if (!strcmp(unit, "SCALE_DOWN")) {
287 pkt->unit = CPU_POWER_SCALE_DOWN;
288 } else if (!strcmp(unit, "SCALE_MAX")) {
289 pkt->unit = CPU_POWER_SCALE_MAX;
290 } else if (!strcmp(unit, "SCALE_MIN")) {
291 pkt->unit = CPU_POWER_SCALE_MIN;
292 } else if (!strcmp(unit, "ENABLE_TURBO")) {
293 pkt->unit = CPU_POWER_ENABLE_TURBO;
294 } else if (!strcmp(unit, "DISABLE_TURBO")) {
295 pkt->unit = CPU_POWER_DISABLE_TURBO;
297 RTE_LOG(ERR, CHANNEL_MONITOR,
298 "Invalid command received in JSON\n");
302 RTE_LOG(ERR, CHANNEL_MONITOR,
303 "Unknown key received in JSON string: %s\n",
307 resource_id = get_resource_id_from_vmname(vm_name);
308 if (resource_id < 0) {
309 RTE_LOG(ERR, CHANNEL_MONITOR,
310 "Could not get resource_id from vm_name:%s\n",
314 strlcpy(pkt->vm_name, vm_name, VM_MAX_NAME_SZ);
315 pkt->resource_id = resource_id;
321 void channel_monitor_exit(void)
324 rte_free(global_events_list);
328 core_share(int pNo, int z, int x, int t)
330 if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
331 if (strcmp(policies[pNo].pkt.vm_name,
332 lvm_info[x].vm_name) != 0) {
333 policies[pNo].core_share[z].status = 1;
334 power_manager_scale_core_max(
335 policies[pNo].core_share[z].pcpu);
341 core_share_status(int pNo)
344 int noVms = 0, noVcpus = 0, z, x, t;
346 get_all_vm(&noVms, &noVcpus);
348 /* Reset Core Share Status. */
349 for (z = 0; z < noVcpus; z++)
350 policies[pNo].core_share[z].status = 0;
352 /* Foreach vcpu in a policy. */
353 for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
354 /* Foreach VM on the platform. */
355 for (x = 0; x < noVms; x++) {
356 /* Foreach vcpu of VMs on platform. */
357 for (t = 0; t < lvm_info[x].num_cpus; t++)
358 core_share(pNo, z, x, t);
365 pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count)
369 if (pol->pkt.policy_to_use == BRANCH_RATIO) {
370 ci->cd[pcpu].oob_enabled = 1;
371 ret = add_core_to_monitor(pcpu);
373 RTE_LOG(INFO, CHANNEL_MONITOR,
374 "Monitoring pcpu %d OOB for %s\n",
375 pcpu, pol->pkt.vm_name);
377 RTE_LOG(ERR, CHANNEL_MONITOR,
378 "Error monitoring pcpu %d OOB for %s\n",
379 pcpu, pol->pkt.vm_name);
382 pol->core_share[count].pcpu = pcpu;
383 RTE_LOG(INFO, CHANNEL_MONITOR,
384 "Monitoring pcpu %d for %s\n",
385 pcpu, pol->pkt.vm_name);
391 get_pcpu_to_control(struct policy *pol)
394 /* Convert vcpu to pcpu. */
397 struct core_info *ci;
399 ci = get_core_info();
401 RTE_LOG(DEBUG, CHANNEL_MONITOR,
402 "Looking for pcpu for %s\n", pol->pkt.vm_name);
405 * So now that we're handling virtual and physical cores, we need to
406 * differenciate between them when adding them to the branch monitor.
407 * Virtual cores need to be converted to physical cores.
409 if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) {
411 * If the cores in the policy are virtual, we need to map them
412 * to physical core. We look up the vm info and use that for
415 get_info_vm(pol->pkt.vm_name, &info);
416 for (count = 0; count < pol->pkt.num_vcpu; count++) {
417 pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]];
418 pcpu_monitor(pol, ci, pcpu, count);
422 * If the cores in the policy are physical, we just use
423 * those core id's directly.
425 for (count = 0; count < pol->pkt.num_vcpu; count++) {
426 pcpu = pol->pkt.vcpu_to_control[count];
427 pcpu_monitor(pol, ci, pcpu, count);
433 get_pfid(struct policy *pol)
438 for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
440 RTE_ETH_FOREACH_DEV(x) {
441 #ifdef RTE_LIBRTE_I40E_PMD
442 ret = rte_pmd_i40e_query_vfid_by_mac(x,
443 (struct rte_ether_addr *)&(pol->pkt.vfid[i]));
447 if (ret != -EINVAL) {
452 if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
453 RTE_LOG(INFO, CHANNEL_MONITOR,
454 "Error with Policy. MAC not found on "
465 update_policy(struct channel_packet *pkt)
468 unsigned int updated = 0;
472 RTE_LOG(INFO, CHANNEL_MONITOR,
473 "Applying policy for %s\n", pkt->vm_name);
475 for (i = 0; i < RTE_DIM(policies); i++) {
476 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
477 /* Copy the contents of *pkt into the policy.pkt */
478 policies[i].pkt = *pkt;
479 get_pcpu_to_control(&policies[i]);
480 /* Check Eth dev only for Traffic policy */
481 if (policies[i].pkt.policy_to_use == TRAFFIC) {
482 if (get_pfid(&policies[i]) < 0) {
487 core_share_status(i);
488 policies[i].enabled = 1;
493 for (i = 0; i < RTE_DIM(policies); i++) {
494 if (policies[i].enabled == 0) {
495 policies[i].pkt = *pkt;
496 get_pcpu_to_control(&policies[i]);
497 /* Check Eth dev only for Traffic policy */
498 if (policies[i].pkt.policy_to_use == TRAFFIC) {
499 if (get_pfid(&policies[i]) < 0) {
504 core_share_status(i);
505 policies[i].enabled = 1;
514 remove_policy(struct channel_packet *pkt __rte_unused)
519 * Disabling the policy is simply a case of setting
522 for (i = 0; i < RTE_DIM(policies); i++) {
523 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
524 policies[i].enabled = 0;
532 get_pkt_diff(struct policy *pol)
535 uint64_t vsi_pkt_count,
537 vsi_pkt_count_prev_total = 0;
538 double rdtsc_curr, rdtsc_diff, diff;
540 #ifdef RTE_LIBRTE_I40E_PMD
541 struct rte_eth_stats vf_stats;
544 for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
546 #ifdef RTE_LIBRTE_I40E_PMD
548 if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
549 vsi_pkt_count = vf_stats.ipackets;
556 vsi_pkt_total += vsi_pkt_count;
558 vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
559 vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
562 rdtsc_curr = rte_rdtsc_precise();
563 rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
564 rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
566 diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
567 ((double)rte_get_tsc_hz() / rdtsc_diff);
573 apply_traffic_profile(struct policy *pol)
579 diff = get_pkt_diff(pol);
581 if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
582 for (count = 0; count < pol->pkt.num_vcpu; count++) {
583 if (pol->core_share[count].status != 1)
584 power_manager_scale_core_max(
585 pol->core_share[count].pcpu);
587 } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
588 for (count = 0; count < pol->pkt.num_vcpu; count++) {
589 if (pol->core_share[count].status != 1)
590 power_manager_scale_core_med(
591 pol->core_share[count].pcpu);
593 } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
594 for (count = 0; count < pol->pkt.num_vcpu; count++) {
595 if (pol->core_share[count].status != 1)
596 power_manager_scale_core_min(
597 pol->core_share[count].pcpu);
603 apply_time_profile(struct policy *pol)
609 char time_string[40];
611 /* Obtain the time of day, and convert it to a tm struct. */
612 gettimeofday(&tv, NULL);
613 ptm = localtime(&tv.tv_sec);
614 /* Format the date and time, down to a single second. */
615 strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
617 for (x = 0; x < HOURS; x++) {
619 if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
620 for (count = 0; count < pol->pkt.num_vcpu; count++) {
621 if (pol->core_share[count].status != 1) {
622 power_manager_scale_core_max(
623 pol->core_share[count].pcpu);
627 } else if (ptm->tm_hour ==
628 pol->pkt.timer_policy.quiet_hours[x]) {
629 for (count = 0; count < pol->pkt.num_vcpu; count++) {
630 if (pol->core_share[count].status != 1) {
631 power_manager_scale_core_min(
632 pol->core_share[count].pcpu);
636 } else if (ptm->tm_hour ==
637 pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
638 apply_traffic_profile(pol);
645 apply_workload_profile(struct policy *pol)
650 if (pol->pkt.workload == HIGH) {
651 for (count = 0; count < pol->pkt.num_vcpu; count++) {
652 if (pol->core_share[count].status != 1)
653 power_manager_scale_core_max(
654 pol->core_share[count].pcpu);
656 } else if (pol->pkt.workload == MEDIUM) {
657 for (count = 0; count < pol->pkt.num_vcpu; count++) {
658 if (pol->core_share[count].status != 1)
659 power_manager_scale_core_med(
660 pol->core_share[count].pcpu);
662 } else if (pol->pkt.workload == LOW) {
663 for (count = 0; count < pol->pkt.num_vcpu; count++) {
664 if (pol->core_share[count].status != 1)
665 power_manager_scale_core_min(
666 pol->core_share[count].pcpu);
672 apply_policy(struct policy *pol)
675 struct channel_packet *pkt = &pol->pkt;
677 /*Check policy to use*/
678 if (pkt->policy_to_use == TRAFFIC)
679 apply_traffic_profile(pol);
680 else if (pkt->policy_to_use == TIME)
681 apply_time_profile(pol);
682 else if (pkt->policy_to_use == WORKLOAD)
683 apply_workload_profile(pol);
687 write_binary_packet(void *buffer,
689 struct channel_info *chan_info)
693 if (buffer_len == 0 || buffer == NULL)
696 if (chan_info->fd < 0) {
697 RTE_LOG(ERR, CHANNEL_MONITOR, "Channel is not connected\n");
701 while (buffer_len > 0) {
702 ret = write(chan_info->fd, buffer, buffer_len);
706 RTE_LOG(ERR, CHANNEL_MONITOR, "Write function failed due to %s.\n",
710 buffer = (char *)buffer + ret;
717 send_freq(struct channel_packet *pkt,
718 struct channel_info *chan_info,
721 unsigned int vcore_id = pkt->resource_id;
722 struct channel_packet_freq_list channel_pkt_freq_list;
725 if (get_info_vm(pkt->vm_name, &info) != 0)
728 if (!freq_list && vcore_id >= MAX_VCPU_PER_VM)
731 channel_pkt_freq_list.command = CPU_POWER_FREQ_LIST;
732 channel_pkt_freq_list.num_vcpu = info.num_vcpus;
736 for (i = 0; i < info.num_vcpus; i++)
737 channel_pkt_freq_list.freq_list[i] =
738 power_manager_get_current_frequency(info.pcpu_map[i]);
740 channel_pkt_freq_list.freq_list[vcore_id] =
741 power_manager_get_current_frequency(info.pcpu_map[vcore_id]);
744 return write_binary_packet(&channel_pkt_freq_list,
745 sizeof(channel_pkt_freq_list),
750 send_ack_for_received_cmd(struct channel_packet *pkt,
751 struct channel_info *chan_info,
754 pkt->command = command;
755 return write_binary_packet(pkt,
756 sizeof(struct channel_packet),
761 process_request(struct channel_packet *pkt, struct channel_info *chan_info)
765 if (chan_info == NULL)
768 if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED,
769 CHANNEL_MGR_CHANNEL_PROCESSING) == 0)
772 if (pkt->command == CPU_POWER) {
773 unsigned int core_num;
775 if (pkt->core_type == CORE_TYPE_VIRTUAL)
776 core_num = get_pcpu(chan_info, pkt->resource_id);
778 core_num = pkt->resource_id;
780 RTE_LOG(DEBUG, CHANNEL_MONITOR, "Processing requested cmd for cpu:%d\n",
784 bool valid_unit = true;
787 case(CPU_POWER_SCALE_MIN):
788 scale_res = power_manager_scale_core_min(core_num);
790 case(CPU_POWER_SCALE_MAX):
791 scale_res = power_manager_scale_core_max(core_num);
793 case(CPU_POWER_SCALE_DOWN):
794 scale_res = power_manager_scale_core_down(core_num);
796 case(CPU_POWER_SCALE_UP):
797 scale_res = power_manager_scale_core_up(core_num);
799 case(CPU_POWER_ENABLE_TURBO):
800 scale_res = power_manager_enable_turbo_core(core_num);
802 case(CPU_POWER_DISABLE_TURBO):
803 scale_res = power_manager_disable_turbo_core(core_num);
811 ret = send_ack_for_received_cmd(pkt,
817 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n");
819 RTE_LOG(ERR, CHANNEL_MONITOR, "Unexpected unit type.\n");
823 if (pkt->command == PKT_POLICY) {
824 RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n",
826 int ret = send_ack_for_received_cmd(pkt,
830 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n");
835 if (pkt->command == PKT_POLICY_REMOVE) {
836 ret = remove_policy(pkt);
838 RTE_LOG(INFO, CHANNEL_MONITOR,
839 "Removed policy %s\n", pkt->vm_name);
841 RTE_LOG(INFO, CHANNEL_MONITOR,
842 "Policy %s does not exist\n", pkt->vm_name);
845 if (pkt->command == CPU_POWER_QUERY_FREQ_LIST ||
846 pkt->command == CPU_POWER_QUERY_FREQ) {
848 RTE_LOG(INFO, CHANNEL_MONITOR,
849 "Frequency for %s requested.\n", pkt->vm_name);
850 int ret = send_freq(pkt,
852 pkt->command == CPU_POWER_QUERY_FREQ_LIST);
854 RTE_LOG(ERR, CHANNEL_MONITOR, "Error during frequency sending.\n");
858 * Return is not checked as channel status may have been set to DISABLED
859 * from management thread
861 rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING,
862 CHANNEL_MGR_CHANNEL_CONNECTED);
868 add_channel_to_monitor(struct channel_info **chan_info)
870 struct channel_info *info = *chan_info;
871 struct epoll_event event;
873 event.events = EPOLLIN;
874 event.data.ptr = info;
875 if (epoll_ctl(global_event_fd, EPOLL_CTL_ADD, info->fd, &event) < 0) {
876 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to add channel '%s' "
877 "to epoll\n", info->channel_path);
880 RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' "
881 "to monitor\n", info->channel_path);
886 remove_channel_from_monitor(struct channel_info *chan_info)
888 if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL,
889 chan_info->fd, NULL) < 0) {
890 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' "
891 "from epoll\n", chan_info->channel_path);
898 channel_monitor_init(void)
900 global_event_fd = epoll_create1(0);
901 if (global_event_fd == 0) {
902 RTE_LOG(ERR, CHANNEL_MONITOR,
903 "Error creating epoll context with error %s\n",
907 global_events_list = rte_malloc("epoll_events",
908 sizeof(*global_events_list)
909 * MAX_EVENTS, RTE_CACHE_LINE_SIZE);
910 if (global_events_list == NULL) {
911 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
919 read_binary_packet(struct channel_info *chan_info)
921 struct channel_packet pkt;
923 int buffer_len = sizeof(pkt);
924 int n_bytes, err = 0;
926 while (buffer_len > 0) {
927 n_bytes = read(chan_info->fd,
929 if (n_bytes == buffer_len)
933 RTE_LOG(DEBUG, CHANNEL_MONITOR,
935 "channel '%s' read: %s\n",
936 chan_info->channel_path,
938 remove_channel(&chan_info);
941 buffer = (char *)buffer + n_bytes;
942 buffer_len -= n_bytes;
945 process_request(&pkt, chan_info);
950 read_json_packet(struct channel_info *chan_info)
952 struct channel_packet pkt;
956 const char *resource_name;
961 /* read opening brace to closing brace */
966 n_bytes = read(chan_info->fd, &json_data[idx], 1);
969 if (json_data[idx] == '{')
971 if (json_data[idx] == '}')
973 if ((indent > 0) || (idx > 0))
977 if (idx >= MAX_JSON_STRING_LEN-1)
979 } while (indent > 0);
981 json_data[idx] = '\0';
983 if (strlen(json_data) == 0)
986 printf("got [%s]\n", json_data);
988 root = json_loads(json_data, 0, &error);
991 resource_name = get_resource_name_from_chn_path(
992 chan_info->channel_path);
994 * Because our data is now in the json
995 * object, we can overwrite the pkt
996 * with a channel_packet struct, using
997 * parse_json_to_pkt()
999 ret = parse_json_to_pkt(root, &pkt, resource_name);
1002 RTE_LOG(ERR, CHANNEL_MONITOR,
1003 "Error validating JSON profile data\n");
1006 start = strstr(pkt.vm_name,
1007 CHANNEL_MGR_FIFO_PATTERN_NAME);
1008 if (start != NULL) {
1009 /* move past pattern to start of fifo id */
1010 start += strlen(CHANNEL_MGR_FIFO_PATTERN_NAME);
1013 n = (uint32_t)strtoul(start, &end, 10);
1015 if (end[0] == '\0') {
1016 /* Add core id to core list */
1018 pkt.vcpu_to_control[0] = n;
1019 process_request(&pkt, chan_info);
1021 RTE_LOG(ERR, CHANNEL_MONITOR,
1022 "Cannot extract core id from fifo name\n");
1025 process_request(&pkt, chan_info);
1028 RTE_LOG(ERR, CHANNEL_MONITOR,
1029 "JSON error on line %d: %s\n",
1030 error.line, error.text);
1032 } while (n_bytes > 0);
1037 run_channel_monitor(void)
1042 n_events = epoll_wait(global_event_fd, global_events_list,
1046 for (i = 0; i < n_events; i++) {
1047 struct channel_info *chan_info = (struct channel_info *)
1048 global_events_list[i].data.ptr;
1049 if ((global_events_list[i].events & EPOLLERR) ||
1050 (global_events_list[i].events & EPOLLHUP)) {
1051 RTE_LOG(INFO, CHANNEL_MONITOR,
1052 "Remote closed connection for "
1054 chan_info->channel_path);
1055 remove_channel(&chan_info);
1058 if (global_events_list[i].events & EPOLLIN) {
1060 switch (chan_info->type) {
1061 case CHANNEL_TYPE_BINARY:
1062 read_binary_packet(chan_info);
1065 case CHANNEL_TYPE_JSON:
1066 read_json_packet(chan_info);
1074 rte_delay_us(time_period_ms*1000);
1075 if (policy_is_set) {
1078 for (j = 0; j < RTE_DIM(policies); j++) {
1079 if (policies[j].enabled == 1)
1080 apply_policy(&policies[j]);