+static void
+core_share(int pNo, int z, int x, int t)
+{
+ if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
+ if (strcmp(policies[pNo].pkt.vm_name,
+ lvm_info[x].vm_name) != 0) {
+ policies[pNo].core_share[z].status = 1;
+ power_manager_scale_core_max(
+ policies[pNo].core_share[z].pcpu);
+ }
+ }
+}
+
+static void
+core_share_status(int pNo)
+{
+
+ int noVms = 0, noVcpus = 0, z, x, t;
+
+ get_all_vm(&noVms, &noVcpus);
+
+ /* Reset Core Share Status. */
+ for (z = 0; z < noVcpus; z++)
+ policies[pNo].core_share[z].status = 0;
+
+ /* Foreach vcpu in a policy. */
+ for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
+ /* Foreach VM on the platform. */
+ for (x = 0; x < noVms; x++) {
+ /* Foreach vcpu of VMs on platform. */
+ for (t = 0; t < lvm_info[x].num_cpus; t++)
+ core_share(pNo, z, x, t);
+ }
+ }
+}
+
+
+static int
+pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count)
+{
+ int ret = 0;
+
+ if (pol->pkt.policy_to_use == RTE_POWER_POLICY_BRANCH_RATIO) {
+ ci->cd[pcpu].oob_enabled = 1;
+ ret = add_core_to_monitor(pcpu);
+ if (ret == 0)
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Monitoring pcpu %d OOB for %s\n",
+ pcpu, pol->pkt.vm_name);
+ else
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Error monitoring pcpu %d OOB for %s\n",
+ pcpu, pol->pkt.vm_name);
+
+ } else {
+ pol->core_share[count].pcpu = pcpu;
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Monitoring pcpu %d for %s\n",
+ pcpu, pol->pkt.vm_name);
+ }
+ return ret;
+}
+
+static void
+get_pcpu_to_control(struct policy *pol)
+{
+
+ /* Convert vcpu to pcpu. */
+ struct vm_info info;
+ int pcpu, count;
+ struct core_info *ci;
+
+ ci = get_core_info();
+
+ RTE_LOG(DEBUG, CHANNEL_MONITOR,
+ "Looking for pcpu for %s\n", pol->pkt.vm_name);
+
+ /*
+ * So now that we're handling virtual and physical cores, we need to
+ * differenciate between them when adding them to the branch monitor.
+ * Virtual cores need to be converted to physical cores.
+ */
+ if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {
+ /*
+ * If the cores in the policy are virtual, we need to map them
+ * to physical core. We look up the vm info and use that for
+ * the mapping.
+ */
+ get_info_vm(pol->pkt.vm_name, &info);
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]];
+ pcpu_monitor(pol, ci, pcpu, count);
+ }
+ } else {
+ /*
+ * If the cores in the policy are physical, we just use
+ * those core id's directly.
+ */
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ pcpu = pol->pkt.vcpu_to_control[count];
+ pcpu_monitor(pol, ci, pcpu, count);
+ }
+ }
+}
+
+static int
+get_pfid(struct policy *pol)
+{
+
+ int i, x, ret = 0;
+
+ for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
+
+ RTE_ETH_FOREACH_DEV(x) {
+#ifdef RTE_NET_I40E
+ ret = rte_pmd_i40e_query_vfid_by_mac(x,
+ (struct rte_ether_addr *)&(pol->pkt.vfid[i]));
+#else
+ ret = -ENOTSUP;
+#endif
+ if (ret != -EINVAL) {
+ pol->port[i] = x;
+ break;
+ }
+ }
+ if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Error with Policy. MAC not found on "
+ "attached ports ");
+ pol->enabled = 0;
+ return ret;
+ }
+ pol->pfid[i] = ret;
+ }
+ return 1;
+}
+
+static int
+update_policy(struct rte_power_channel_packet *pkt)
+{
+
+ unsigned int updated = 0;
+ unsigned int i;
+
+
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Applying policy for %s\n", pkt->vm_name);
+
+ for (i = 0; i < RTE_DIM(policies); i++) {
+ if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
+ /* Copy the contents of *pkt into the policy.pkt */
+ policies[i].pkt = *pkt;
+ get_pcpu_to_control(&policies[i]);
+ /* Check Eth dev only for Traffic policy */
+ if (policies[i].pkt.policy_to_use ==
+ RTE_POWER_POLICY_TRAFFIC) {
+ if (get_pfid(&policies[i]) < 0) {
+ updated = 1;
+ break;
+ }
+ }
+ core_share_status(i);
+ policies[i].enabled = 1;
+ updated = 1;
+ }
+ }
+ if (!updated) {
+ for (i = 0; i < RTE_DIM(policies); i++) {
+ if (policies[i].enabled == 0) {
+ policies[i].pkt = *pkt;
+ get_pcpu_to_control(&policies[i]);
+ /* Check Eth dev only for Traffic policy */
+ if (policies[i].pkt.policy_to_use ==
+ RTE_POWER_POLICY_TRAFFIC) {
+ if (get_pfid(&policies[i]) < 0) {
+ updated = 1;
+ break;
+ }
+ }
+ core_share_status(i);
+ policies[i].enabled = 1;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+remove_policy(struct rte_power_channel_packet *pkt __rte_unused)
+{
+ unsigned int i;
+
+ /*
+ * Disabling the policy is simply a case of setting
+ * enabled to 0
+ */
+ for (i = 0; i < RTE_DIM(policies); i++) {
+ if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
+ policies[i].enabled = 0;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static uint64_t
+get_pkt_diff(struct policy *pol)
+{
+
+ uint64_t vsi_pkt_count,
+ vsi_pkt_total = 0,
+ vsi_pkt_count_prev_total = 0;
+ double rdtsc_curr, rdtsc_diff, diff;
+ int x;
+#ifdef RTE_NET_I40E
+ struct rte_eth_stats vf_stats;
+#endif
+
+ for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
+
+#ifdef RTE_NET_I40E
+ /*Read vsi stats*/
+ if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
+ vsi_pkt_count = vf_stats.ipackets;
+ else
+ vsi_pkt_count = -1;
+#else
+ vsi_pkt_count = -1;
+#endif
+
+ vsi_pkt_total += vsi_pkt_count;
+
+ vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
+ vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
+ }
+
+ rdtsc_curr = rte_rdtsc_precise();
+ rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
+ rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
+
+ diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
+ ((double)rte_get_tsc_hz() / rdtsc_diff);
+
+ return diff;
+}
+
+static void
+apply_traffic_profile(struct policy *pol)
+{
+
+ int count;
+ uint64_t diff = 0;
+
+ diff = get_pkt_diff(pol);
+
+ if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_med(
+ pol->core_share[count].pcpu);
+ }
+ } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+}
+
+static void
+apply_time_profile(struct policy *pol)
+{
+
+ int count, x;
+ struct timeval tv;
+ struct tm *ptm;
+ char time_string[40];
+
+ /* Obtain the time of day, and convert it to a tm struct. */
+ gettimeofday(&tv, NULL);
+ ptm = localtime(&tv.tv_sec);
+ /* Format the date and time, down to a single second. */
+ strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
+
+ for (x = 0; x < RTE_POWER_HOURS_PER_DAY; x++) {
+
+ if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1) {
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ }
+ break;
+ } else if (ptm->tm_hour ==
+ pol->pkt.timer_policy.quiet_hours[x]) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1) {
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+ break;
+ } else if (ptm->tm_hour ==
+ pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
+ apply_traffic_profile(pol);
+ break;
+ }
+ }
+}
+
+static void
+apply_workload_profile(struct policy *pol)
+{
+
+ int count;
+
+ if (pol->pkt.workload == RTE_POWER_WL_HIGH) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ } else if (pol->pkt.workload == RTE_POWER_WL_MEDIUM) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_med(
+ pol->core_share[count].pcpu);
+ }
+ } else if (pol->pkt.workload == RTE_POWER_WL_LOW) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+}
+
+static void
+apply_policy(struct policy *pol)
+{
+
+ struct rte_power_channel_packet *pkt = &pol->pkt;
+
+ /*Check policy to use*/
+ if (pkt->policy_to_use == RTE_POWER_POLICY_TRAFFIC)
+ apply_traffic_profile(pol);
+ else if (pkt->policy_to_use == RTE_POWER_POLICY_TIME)
+ apply_time_profile(pol);
+ else if (pkt->policy_to_use == RTE_POWER_POLICY_WORKLOAD)
+ apply_workload_profile(pol);
+}
+
+static int
+write_binary_packet(void *buffer,
+ size_t buffer_len,
+ struct channel_info *chan_info)
+{
+ int ret;
+
+ if (buffer_len == 0 || buffer == NULL)
+ return -1;
+
+ if (chan_info->fd < 0) {
+ RTE_LOG(ERR, CHANNEL_MONITOR, "Channel is not connected\n");
+ return -1;
+ }
+
+ while (buffer_len > 0) {
+ ret = write(chan_info->fd, buffer, buffer_len);
+ if (ret == -1) {
+ if (errno == EINTR)
+ continue;
+ RTE_LOG(ERR, CHANNEL_MONITOR, "Write function failed due to %s.\n",
+ strerror(errno));
+ return -1;
+ }
+ buffer = (char *)buffer + ret;
+ buffer_len -= ret;
+ }
+ return 0;
+}
+
+static int
+send_freq(struct rte_power_channel_packet *pkt,
+ struct channel_info *chan_info,
+ bool freq_list)
+{
+ unsigned int vcore_id = pkt->resource_id;
+ struct rte_power_channel_packet_freq_list channel_pkt_freq_list;
+ struct vm_info info;
+
+ if (get_info_vm(pkt->vm_name, &info) != 0)
+ return -1;
+
+ if (!freq_list && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM)
+ return -1;
+
+ if (!info.allow_query)
+ return -1;
+
+ channel_pkt_freq_list.command = RTE_POWER_FREQ_LIST;
+ channel_pkt_freq_list.num_vcpu = info.num_vcpus;
+
+ if (freq_list) {
+ unsigned int i;
+ for (i = 0; i < info.num_vcpus; i++)
+ channel_pkt_freq_list.freq_list[i] =
+ power_manager_get_current_frequency(info.pcpu_map[i]);
+ } else {
+ channel_pkt_freq_list.freq_list[vcore_id] =
+ power_manager_get_current_frequency(info.pcpu_map[vcore_id]);
+ }
+
+ return write_binary_packet(&channel_pkt_freq_list,
+ sizeof(channel_pkt_freq_list),
+ chan_info);
+}
+
+static int
+send_capabilities(struct rte_power_channel_packet *pkt,
+ struct channel_info *chan_info,
+ bool list_requested)
+{
+ unsigned int vcore_id = pkt->resource_id;
+ struct rte_power_channel_packet_caps_list channel_pkt_caps_list;
+ struct vm_info info;
+ struct rte_power_core_capabilities caps;
+ int ret;
+
+ if (get_info_vm(pkt->vm_name, &info) != 0)
+ return -1;
+
+ if (!list_requested && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM)
+ return -1;
+
+ if (!info.allow_query)
+ return -1;
+
+ channel_pkt_caps_list.command = RTE_POWER_CAPS_LIST;
+ channel_pkt_caps_list.num_vcpu = info.num_vcpus;
+
+ if (list_requested) {
+ unsigned int i;
+ for (i = 0; i < info.num_vcpus; i++) {
+ ret = rte_power_get_capabilities(info.pcpu_map[i],
+ &caps);
+ if (ret == 0) {
+ channel_pkt_caps_list.turbo[i] =
+ caps.turbo;
+ channel_pkt_caps_list.priority[i] =
+ caps.priority;
+ } else
+ return -1;
+
+ }
+ } else {
+ ret = rte_power_get_capabilities(info.pcpu_map[vcore_id],
+ &caps);
+ if (ret == 0) {
+ channel_pkt_caps_list.turbo[vcore_id] =
+ caps.turbo;
+ channel_pkt_caps_list.priority[vcore_id] =
+ caps.priority;
+ } else
+ return -1;
+ }
+
+ return write_binary_packet(&channel_pkt_caps_list,
+ sizeof(channel_pkt_caps_list),
+ chan_info);
+}
+
+static int
+send_ack_for_received_cmd(struct rte_power_channel_packet *pkt,
+ struct channel_info *chan_info,
+ uint32_t command)
+{
+ pkt->command = command;
+ return write_binary_packet(pkt,
+ sizeof(*pkt),
+ chan_info);
+}
+