return found;
}
+static inline int
+get_monitor_addresses(struct pmd_core_cfg *cfg,
+ struct rte_power_monitor_cond *pmc, size_t len)
+{
+ const struct queue_list_entry *qle;
+ size_t i = 0;
+ int ret;
+
+ TAILQ_FOREACH(qle, &cfg->head, next) {
+ const union queue *q = &qle->queue;
+ struct rte_power_monitor_cond *cur;
+
+ /* attempted out of bounds access */
+ if (i >= len) {
+ RTE_LOG(ERR, POWER, "Too many queues being monitored\n");
+ return -1;
+ }
+
+ cur = &pmc[i++];
+ ret = rte_eth_get_monitor_addr(q->portid, q->qid, cur);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
static void
calc_tsc(void)
{
return true;
}
+static uint16_t
+clb_multiwait(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused,
+ struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
+ uint16_t max_pkts __rte_unused, void *arg)
+{
+ const unsigned int lcore = rte_lcore_id();
+ struct queue_list_entry *queue_conf = arg;
+ struct pmd_core_cfg *lcore_conf;
+ const bool empty = nb_rx == 0;
+
+ lcore_conf = &lcore_cfgs[lcore];
+
+ /* early exit */
+ if (likely(!empty))
+ /* early exit */
+ queue_reset(lcore_conf, queue_conf);
+ else {
+ struct rte_power_monitor_cond pmc[lcore_conf->n_queues];
+ int ret;
+
+ /* can this queue sleep? */
+ if (!queue_can_sleep(lcore_conf, queue_conf))
+ return nb_rx;
+
+ /* can this lcore sleep? */
+ if (!lcore_can_sleep(lcore_conf))
+ return nb_rx;
+
+ /* gather all monitoring conditions */
+ ret = get_monitor_addresses(lcore_conf, pmc,
+ lcore_conf->n_queues);
+ if (ret < 0)
+ return nb_rx;
+
+ rte_power_monitor_multi(pmc, lcore_conf->n_queues, UINT64_MAX);
+ }
+
+ return nb_rx;
+}
+
static uint16_t
clb_umwait(uint16_t port_id, uint16_t qidx, struct rte_mbuf **pkts __rte_unused,
uint16_t nb_rx, uint16_t max_pkts __rte_unused, void *arg)
{
struct rte_eth_rxq_info qinfo;
- if (rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo) < 0)
- return -1;
+ int ret = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
+ if (ret < 0) {
+ if (ret == -ENOTSUP)
+ return 1;
+ else
+ return -1;
+ }
return qinfo.queue_state == RTE_ETH_QUEUE_STATE_STOPPED;
}
check_monitor(struct pmd_core_cfg *cfg, const union queue *qdata)
{
struct rte_power_monitor_cond dummy;
+ bool multimonitor_supported;
/* check if rte_power_monitor is supported */
if (!global_data.intrinsics_support.power_monitor) {
RTE_LOG(DEBUG, POWER, "Monitoring intrinsics are not supported\n");
return -ENOTSUP;
}
+ /* check if multi-monitor is supported */
+ multimonitor_supported =
+ global_data.intrinsics_support.power_monitor_multi;
- if (cfg->n_queues > 0) {
+ /* if we're adding a new queue, do we support multiple queues? */
+ if (cfg->n_queues > 0 && !multimonitor_supported) {
RTE_LOG(DEBUG, POWER, "Monitoring multiple queues is not supported\n");
return -ENOTSUP;
}
return 0;
}
+static inline rte_rx_callback_fn
+get_monitor_callback(void)
+{
+ return global_data.intrinsics_support.power_monitor_multi ?
+ clb_multiwait : clb_umwait;
+}
+
int
rte_power_ethdev_pmgmt_queue_enable(unsigned int lcore_id, uint16_t port_id,
uint16_t queue_id, enum rte_power_pmd_mgmt_type mode)
if (ret < 0)
goto end;
- clb = clb_umwait;
+ clb = get_monitor_callback();
break;
case RTE_POWER_MGMT_TYPE_SCALE:
+ clb = clb_scale_freq;
+
+ /* we only have to check this when enabling first queue */
+ if (lcore_cfg->pwr_mgmt_state != PMD_MGMT_DISABLED)
+ break;
/* check if we can add a new queue */
ret = check_scale(lcore_id);
if (ret < 0)
goto end;
- clb = clb_scale_freq;
break;
case RTE_POWER_MGMT_TYPE_PAUSE:
/* figure out various time-to-tsc conversions */
rte_eth_remove_rx_callback(port_id, queue_id, queue_cfg->cb);
break;
case RTE_POWER_MGMT_TYPE_SCALE:
- rte_power_freq_max(lcore_id);
rte_eth_remove_rx_callback(port_id, queue_id, queue_cfg->cb);
- rte_power_exit(lcore_id);
+ /* disable power library on this lcore if this was last queue */
+ if (lcore_cfg->pwr_mgmt_state == PMD_MGMT_DISABLED) {
+ rte_power_freq_max(lcore_id);
+ rte_power_exit(lcore_id);
+ }
break;
}
/*