1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/types.h>
14 #include <sys/epoll.h>
15 #include <sys/queue.h>
17 #include <sys/socket.h>
18 #include <sys/select.h>
22 #pragma message "Jansson dev libs unavailable, not including JSON parsing"
25 #include <rte_memory.h>
26 #include <rte_malloc.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_ethdev.h>
30 #include <rte_pmd_i40e.h>
32 #include <libvirt/libvirt.h>
33 #include "channel_monitor.h"
34 #include "channel_commands.h"
35 #include "channel_manager.h"
36 #include "power_manager.h"
37 #include "oob_monitor.h"
39 #define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1
41 #define MAX_EVENTS 256
43 uint64_t vsi_pkt_count_prev[384];
44 uint64_t rdtsc_prev[384];
45 #define MAX_JSON_STRING_LEN 1024
46 char json_data[MAX_JSON_STRING_LEN];
48 double time_period_ms = 1;
49 static volatile unsigned run_loop = 1;
50 static int global_event_fd;
51 static unsigned int policy_is_set;
52 static struct epoll_event *global_events_list;
53 static struct policy policies[MAX_CLIENTS];
58 struct ether_addr addr;
63 str_to_ether_addr(const char *a, struct ether_addr *ether_addr)
67 unsigned long o[ETHER_ADDR_LEN];
72 o[i] = strtoul(a, &end, 16);
73 if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
76 } while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0);
78 /* Junk at the end of line */
82 /* Support the format XX:XX:XX:XX:XX:XX */
83 if (i == ETHER_ADDR_LEN) {
87 ether_addr->addr_bytes[i] = (uint8_t)o[i];
89 /* Support the format XXXX:XXXX:XXXX */
90 } else if (i == ETHER_ADDR_LEN / 2) {
92 if (o[i] > UINT16_MAX)
94 ether_addr->addr_bytes[i * 2] =
96 ether_addr->addr_bytes[i * 2 + 1] =
97 (uint8_t)(o[i] & 0xff);
107 set_policy_mac(struct channel_packet *pkt, int idx, char *mac)
112 /* Use port MAC address as the vfid */
113 ret = str_to_ether_addr(mac, &pfid.addr);
116 RTE_LOG(ERR, CHANNEL_MONITOR,
117 "Invalid mac address received in JSON\n");
122 printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
123 "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
124 pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
125 pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
126 pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
128 pkt->vfid[idx] = pfid.pfid;
134 parse_json_to_pkt(json_t *element, struct channel_packet *pkt)
140 memset(pkt, 0, sizeof(struct channel_packet));
142 pkt->nb_mac_to_monitor = 0;
143 pkt->t_boost_status.tbEnabled = false;
145 pkt->policy_to_use = TIME;
146 pkt->command = PKT_POLICY;
147 pkt->core_type = CORE_TYPE_PHYSICAL;
149 json_object_foreach(element, key, value) {
150 if (!strcmp(key, "policy")) {
151 /* Recurse in to get the contents of profile */
152 ret = parse_json_to_pkt(value, pkt);
155 } else if (!strcmp(key, "instruction")) {
156 /* Recurse in to get the contents of instruction */
157 ret = parse_json_to_pkt(value, pkt);
160 } else if (!strcmp(key, "name")) {
161 strcpy(pkt->vm_name, json_string_value(value));
162 } else if (!strcmp(key, "command")) {
164 snprintf(command, 32, "%s", json_string_value(value));
165 if (!strcmp(command, "power")) {
166 pkt->command = CPU_POWER;
167 } else if (!strcmp(command, "create")) {
168 pkt->command = PKT_POLICY;
169 } else if (!strcmp(command, "destroy")) {
170 pkt->command = PKT_POLICY_REMOVE;
172 RTE_LOG(ERR, CHANNEL_MONITOR,
173 "Invalid command received in JSON\n");
176 } else if (!strcmp(key, "policy_type")) {
178 snprintf(command, 32, "%s", json_string_value(value));
179 if (!strcmp(command, "TIME")) {
180 pkt->policy_to_use = TIME;
181 } else if (!strcmp(command, "TRAFFIC")) {
182 pkt->policy_to_use = TRAFFIC;
183 } else if (!strcmp(command, "WORKLOAD")) {
184 pkt->policy_to_use = WORKLOAD;
185 } else if (!strcmp(command, "BRANCH_RATIO")) {
186 pkt->policy_to_use = BRANCH_RATIO;
188 RTE_LOG(ERR, CHANNEL_MONITOR,
189 "Wrong policy_type received in JSON\n");
192 } else if (!strcmp(key, "workload")) {
194 snprintf(command, 32, "%s", json_string_value(value));
195 if (!strcmp(command, "HIGH")) {
196 pkt->workload = HIGH;
197 } else if (!strcmp(command, "MEDIUM")) {
198 pkt->workload = MEDIUM;
199 } else if (!strcmp(command, "LOW")) {
202 RTE_LOG(ERR, CHANNEL_MONITOR,
203 "Wrong workload received in JSON\n");
206 } else if (!strcmp(key, "busy_hours")) {
208 size_t size = json_array_size(value);
210 for (i = 0; i < size; i++) {
211 int hour = (int)json_integer_value(
212 json_array_get(value, i));
213 pkt->timer_policy.busy_hours[i] = hour;
215 } else if (!strcmp(key, "quiet_hours")) {
217 size_t size = json_array_size(value);
219 for (i = 0; i < size; i++) {
220 int hour = (int)json_integer_value(
221 json_array_get(value, i));
222 pkt->timer_policy.quiet_hours[i] = hour;
224 } else if (!strcmp(key, "core_list")) {
226 size_t size = json_array_size(value);
228 for (i = 0; i < size; i++) {
229 int core = (int)json_integer_value(
230 json_array_get(value, i));
231 pkt->vcpu_to_control[i] = core;
233 pkt->num_vcpu = size;
234 } else if (!strcmp(key, "mac_list")) {
236 size_t size = json_array_size(value);
238 for (i = 0; i < size; i++) {
240 snprintf(mac, 32, "%s", json_string_value(
241 json_array_get(value, i)));
242 set_policy_mac(pkt, i, mac);
244 pkt->nb_mac_to_monitor = size;
245 } else if (!strcmp(key, "avg_packet_thresh")) {
246 pkt->traffic_policy.avg_max_packet_thresh =
247 (uint32_t)json_integer_value(value);
248 } else if (!strcmp(key, "max_packet_thresh")) {
249 pkt->traffic_policy.max_max_packet_thresh =
250 (uint32_t)json_integer_value(value);
251 } else if (!strcmp(key, "unit")) {
253 snprintf(unit, 32, "%s", json_string_value(value));
254 if (!strcmp(unit, "SCALE_UP")) {
255 pkt->unit = CPU_POWER_SCALE_UP;
256 } else if (!strcmp(unit, "SCALE_DOWN")) {
257 pkt->unit = CPU_POWER_SCALE_DOWN;
258 } else if (!strcmp(unit, "SCALE_MAX")) {
259 pkt->unit = CPU_POWER_SCALE_MAX;
260 } else if (!strcmp(unit, "SCALE_MIN")) {
261 pkt->unit = CPU_POWER_SCALE_MIN;
262 } else if (!strcmp(unit, "ENABLE_TURBO")) {
263 pkt->unit = CPU_POWER_ENABLE_TURBO;
264 } else if (!strcmp(unit, "DISABLE_TURBO")) {
265 pkt->unit = CPU_POWER_DISABLE_TURBO;
267 RTE_LOG(ERR, CHANNEL_MONITOR,
268 "Invalid command received in JSON\n");
271 } else if (!strcmp(key, "resource_id")) {
272 pkt->resource_id = (uint32_t)json_integer_value(value);
274 RTE_LOG(ERR, CHANNEL_MONITOR,
275 "Unknown key received in JSON string: %s\n",
283 void channel_monitor_exit(void)
286 rte_free(global_events_list);
290 core_share(int pNo, int z, int x, int t)
292 if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
293 if (strcmp(policies[pNo].pkt.vm_name,
294 lvm_info[x].vm_name) != 0) {
295 policies[pNo].core_share[z].status = 1;
296 power_manager_scale_core_max(
297 policies[pNo].core_share[z].pcpu);
303 core_share_status(int pNo)
306 int noVms = 0, noVcpus = 0, z, x, t;
308 get_all_vm(&noVms, &noVcpus);
310 /* Reset Core Share Status. */
311 for (z = 0; z < noVcpus; z++)
312 policies[pNo].core_share[z].status = 0;
314 /* Foreach vcpu in a policy. */
315 for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
316 /* Foreach VM on the platform. */
317 for (x = 0; x < noVms; x++) {
318 /* Foreach vcpu of VMs on platform. */
319 for (t = 0; t < lvm_info[x].num_cpus; t++)
320 core_share(pNo, z, x, t);
327 pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count)
331 if (pol->pkt.policy_to_use == BRANCH_RATIO) {
332 ci->cd[pcpu].oob_enabled = 1;
333 ret = add_core_to_monitor(pcpu);
335 RTE_LOG(INFO, CHANNEL_MONITOR,
336 "Monitoring pcpu %d OOB for %s\n",
337 pcpu, pol->pkt.vm_name);
339 RTE_LOG(ERR, CHANNEL_MONITOR,
340 "Error monitoring pcpu %d OOB for %s\n",
341 pcpu, pol->pkt.vm_name);
344 pol->core_share[count].pcpu = pcpu;
345 RTE_LOG(INFO, CHANNEL_MONITOR,
346 "Monitoring pcpu %d for %s\n",
347 pcpu, pol->pkt.vm_name);
353 get_pcpu_to_control(struct policy *pol)
356 /* Convert vcpu to pcpu. */
360 struct core_info *ci;
362 ci = get_core_info();
364 RTE_LOG(INFO, CHANNEL_MONITOR,
365 "Looking for pcpu for %s\n", pol->pkt.vm_name);
368 * So now that we're handling virtual and physical cores, we need to
369 * differenciate between them when adding them to the branch monitor.
370 * Virtual cores need to be converted to physical cores.
372 if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) {
374 * If the cores in the policy are virtual, we need to map them
375 * to physical core. We look up the vm info and use that for
378 get_info_vm(pol->pkt.vm_name, &info);
379 for (count = 0; count < pol->pkt.num_vcpu; count++) {
381 info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
382 for (pcpu = 0; mask_u64b;
383 mask_u64b &= ~(1ULL << pcpu++)) {
384 if ((mask_u64b >> pcpu) & 1)
385 pcpu_monitor(pol, ci, pcpu, count);
390 * If the cores in the policy are physical, we just use
391 * those core id's directly.
393 for (count = 0; count < pol->pkt.num_vcpu; count++) {
394 pcpu = pol->pkt.vcpu_to_control[count];
395 pcpu_monitor(pol, ci, pcpu, count);
401 get_pfid(struct policy *pol)
406 for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
408 RTE_ETH_FOREACH_DEV(x) {
409 ret = rte_pmd_i40e_query_vfid_by_mac(x,
410 (struct ether_addr *)&(pol->pkt.vfid[i]));
411 if (ret != -EINVAL) {
416 if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
417 RTE_LOG(INFO, CHANNEL_MONITOR,
418 "Error with Policy. MAC not found on "
429 update_policy(struct channel_packet *pkt)
432 unsigned int updated = 0;
436 RTE_LOG(INFO, CHANNEL_MONITOR,
437 "Applying policy for %s\n", pkt->vm_name);
439 for (i = 0; i < MAX_CLIENTS; i++) {
440 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
441 /* Copy the contents of *pkt into the policy.pkt */
442 policies[i].pkt = *pkt;
443 get_pcpu_to_control(&policies[i]);
444 if (get_pfid(&policies[i]) == -1) {
448 core_share_status(i);
449 policies[i].enabled = 1;
454 for (i = 0; i < MAX_CLIENTS; i++) {
455 if (policies[i].enabled == 0) {
456 policies[i].pkt = *pkt;
457 get_pcpu_to_control(&policies[i]);
458 if (get_pfid(&policies[i]) == -1)
460 core_share_status(i);
461 policies[i].enabled = 1;
470 remove_policy(struct channel_packet *pkt __rte_unused)
475 * Disabling the policy is simply a case of setting
478 for (i = 0; i < MAX_CLIENTS; i++) {
479 if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
480 policies[i].enabled = 0;
488 get_pkt_diff(struct policy *pol)
491 uint64_t vsi_pkt_count,
493 vsi_pkt_count_prev_total = 0;
494 double rdtsc_curr, rdtsc_diff, diff;
496 struct rte_eth_stats vf_stats;
498 for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
501 if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
502 vsi_pkt_count = vf_stats.ipackets;
506 vsi_pkt_total += vsi_pkt_count;
508 vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
509 vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
512 rdtsc_curr = rte_rdtsc_precise();
513 rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
514 rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
516 diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
517 ((double)rte_get_tsc_hz() / rdtsc_diff);
523 apply_traffic_profile(struct policy *pol)
529 diff = get_pkt_diff(pol);
531 RTE_LOG(INFO, CHANNEL_MONITOR, "Applying traffic profile\n");
533 if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
534 for (count = 0; count < pol->pkt.num_vcpu; count++) {
535 if (pol->core_share[count].status != 1)
536 power_manager_scale_core_max(
537 pol->core_share[count].pcpu);
539 } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
540 for (count = 0; count < pol->pkt.num_vcpu; count++) {
541 if (pol->core_share[count].status != 1)
542 power_manager_scale_core_med(
543 pol->core_share[count].pcpu);
545 } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
546 for (count = 0; count < pol->pkt.num_vcpu; count++) {
547 if (pol->core_share[count].status != 1)
548 power_manager_scale_core_min(
549 pol->core_share[count].pcpu);
555 apply_time_profile(struct policy *pol)
561 char time_string[40];
563 /* Obtain the time of day, and convert it to a tm struct. */
564 gettimeofday(&tv, NULL);
565 ptm = localtime(&tv.tv_sec);
566 /* Format the date and time, down to a single second. */
567 strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
569 for (x = 0; x < HOURS; x++) {
571 if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
572 for (count = 0; count < pol->pkt.num_vcpu; count++) {
573 if (pol->core_share[count].status != 1) {
574 power_manager_scale_core_max(
575 pol->core_share[count].pcpu);
576 RTE_LOG(INFO, CHANNEL_MONITOR,
577 "Scaling up core %d to max\n",
578 pol->core_share[count].pcpu);
582 } else if (ptm->tm_hour ==
583 pol->pkt.timer_policy.quiet_hours[x]) {
584 for (count = 0; count < pol->pkt.num_vcpu; count++) {
585 if (pol->core_share[count].status != 1) {
586 power_manager_scale_core_min(
587 pol->core_share[count].pcpu);
588 RTE_LOG(INFO, CHANNEL_MONITOR,
589 "Scaling down core %d to min\n",
590 pol->core_share[count].pcpu);
594 } else if (ptm->tm_hour ==
595 pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
596 apply_traffic_profile(pol);
603 apply_workload_profile(struct policy *pol)
608 if (pol->pkt.workload == HIGH) {
609 for (count = 0; count < pol->pkt.num_vcpu; count++) {
610 if (pol->core_share[count].status != 1)
611 power_manager_scale_core_max(
612 pol->core_share[count].pcpu);
614 } else if (pol->pkt.workload == MEDIUM) {
615 for (count = 0; count < pol->pkt.num_vcpu; count++) {
616 if (pol->core_share[count].status != 1)
617 power_manager_scale_core_med(
618 pol->core_share[count].pcpu);
620 } else if (pol->pkt.workload == LOW) {
621 for (count = 0; count < pol->pkt.num_vcpu; count++) {
622 if (pol->core_share[count].status != 1)
623 power_manager_scale_core_min(
624 pol->core_share[count].pcpu);
630 apply_policy(struct policy *pol)
633 struct channel_packet *pkt = &pol->pkt;
635 /*Check policy to use*/
636 if (pkt->policy_to_use == TRAFFIC)
637 apply_traffic_profile(pol);
638 else if (pkt->policy_to_use == TIME)
639 apply_time_profile(pol);
640 else if (pkt->policy_to_use == WORKLOAD)
641 apply_workload_profile(pol);
645 process_request(struct channel_packet *pkt, struct channel_info *chan_info)
649 if (chan_info == NULL)
652 RTE_LOG(INFO, CHANNEL_MONITOR, "Processing Request %s\n", pkt->vm_name);
654 if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED,
655 CHANNEL_MGR_CHANNEL_PROCESSING) == 0)
658 if (pkt->command == CPU_POWER) {
659 core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
660 if (core_mask == 0) {
662 * Core mask will be 0 in the case where
663 * hypervisor is not available so we're working in
664 * the host, so use the core as the mask.
666 core_mask = 1ULL << pkt->resource_id;
668 if (__builtin_popcountll(core_mask) == 1) {
670 unsigned core_num = __builtin_ffsll(core_mask) - 1;
673 case(CPU_POWER_SCALE_MIN):
674 power_manager_scale_core_min(core_num);
676 case(CPU_POWER_SCALE_MAX):
677 power_manager_scale_core_max(core_num);
679 case(CPU_POWER_SCALE_DOWN):
680 power_manager_scale_core_down(core_num);
682 case(CPU_POWER_SCALE_UP):
683 power_manager_scale_core_up(core_num);
685 case(CPU_POWER_ENABLE_TURBO):
686 power_manager_enable_turbo_core(core_num);
688 case(CPU_POWER_DISABLE_TURBO):
689 power_manager_disable_turbo_core(core_num);
696 case(CPU_POWER_SCALE_MIN):
697 power_manager_scale_mask_min(core_mask);
699 case(CPU_POWER_SCALE_MAX):
700 power_manager_scale_mask_max(core_mask);
702 case(CPU_POWER_SCALE_DOWN):
703 power_manager_scale_mask_down(core_mask);
705 case(CPU_POWER_SCALE_UP):
706 power_manager_scale_mask_up(core_mask);
708 case(CPU_POWER_ENABLE_TURBO):
709 power_manager_enable_turbo_mask(core_mask);
711 case(CPU_POWER_DISABLE_TURBO):
712 power_manager_disable_turbo_mask(core_mask);
721 if (pkt->command == PKT_POLICY) {
722 RTE_LOG(INFO, CHANNEL_MONITOR,
723 "\nProcessing Policy request\n");
728 if (pkt->command == PKT_POLICY_REMOVE) {
729 RTE_LOG(INFO, CHANNEL_MONITOR,
730 "Removing policy %s\n", pkt->vm_name);
735 * Return is not checked as channel status may have been set to DISABLED
736 * from management thread
738 rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING,
739 CHANNEL_MGR_CHANNEL_CONNECTED);
745 add_channel_to_monitor(struct channel_info **chan_info)
747 struct channel_info *info = *chan_info;
748 struct epoll_event event;
750 event.events = EPOLLIN;
751 event.data.ptr = info;
752 if (epoll_ctl(global_event_fd, EPOLL_CTL_ADD, info->fd, &event) < 0) {
753 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to add channel '%s' "
754 "to epoll\n", info->channel_path);
757 RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' "
758 "to monitor\n", info->channel_path);
763 remove_channel_from_monitor(struct channel_info *chan_info)
765 if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL,
766 chan_info->fd, NULL) < 0) {
767 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' "
768 "from epoll\n", chan_info->channel_path);
775 channel_monitor_init(void)
777 global_event_fd = epoll_create1(0);
778 if (global_event_fd == 0) {
779 RTE_LOG(ERR, CHANNEL_MONITOR,
780 "Error creating epoll context with error %s\n",
784 global_events_list = rte_malloc("epoll_events",
785 sizeof(*global_events_list)
786 * MAX_EVENTS, RTE_CACHE_LINE_SIZE);
787 if (global_events_list == NULL) {
788 RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
796 read_binary_packet(struct channel_info *chan_info)
798 struct channel_packet pkt;
800 int buffer_len = sizeof(pkt);
801 int n_bytes, err = 0;
803 while (buffer_len > 0) {
804 n_bytes = read(chan_info->fd,
806 if (n_bytes == buffer_len)
810 RTE_LOG(DEBUG, CHANNEL_MONITOR,
812 "channel '%s' read: %s\n",
813 chan_info->channel_path,
815 remove_channel(&chan_info);
818 buffer = (char *)buffer + n_bytes;
819 buffer_len -= n_bytes;
822 process_request(&pkt, chan_info);
827 read_json_packet(struct channel_info *chan_info)
829 struct channel_packet pkt;
834 /* read opening brace to closing brace */
839 n_bytes = read(chan_info->fd, &json_data[idx], 1);
842 if (json_data[idx] == '{')
844 if (json_data[idx] == '}')
846 if ((indent > 0) || (idx > 0))
850 if (idx >= MAX_JSON_STRING_LEN-1)
852 } while (indent > 0);
856 * We've broken out of the read loop without getting
857 * a closing brace, so throw away the data
861 if (strlen(json_data) == 0)
864 printf("got [%s]\n", json_data);
866 root = json_loads(json_data, 0, &error);
870 * Because our data is now in the json
871 * object, we can overwrite the pkt
872 * with a channel_packet struct, using
873 * parse_json_to_pkt()
875 ret = parse_json_to_pkt(root, &pkt);
878 RTE_LOG(ERR, CHANNEL_MONITOR,
879 "Error validating JSON profile data\n");
882 process_request(&pkt, chan_info);
884 RTE_LOG(ERR, CHANNEL_MONITOR,
885 "JSON error on line %d: %s\n",
886 error.line, error.text);
888 } while (n_bytes > 0);
893 run_channel_monitor(void)
898 n_events = epoll_wait(global_event_fd, global_events_list,
902 for (i = 0; i < n_events; i++) {
903 struct channel_info *chan_info = (struct channel_info *)
904 global_events_list[i].data.ptr;
905 if ((global_events_list[i].events & EPOLLERR) ||
906 (global_events_list[i].events & EPOLLHUP)) {
907 RTE_LOG(DEBUG, CHANNEL_MONITOR, "Remote closed connection for "
909 chan_info->channel_path);
910 remove_channel(&chan_info);
913 if (global_events_list[i].events & EPOLLIN) {
915 switch (chan_info->type) {
916 case CHANNEL_TYPE_BINARY:
917 read_binary_packet(chan_info);
920 case CHANNEL_TYPE_JSON:
921 read_json_packet(chan_info);
929 rte_delay_us(time_period_ms*1000);
933 for (j = 0; j < MAX_CLIENTS; j++) {
934 if (policies[j].enabled == 1)
935 apply_policy(&policies[j]);