summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDennis Kobert <dennis@kobert.dev>2025-04-16 15:10:27 +0200
committerDennis Kobert <dennis@kobert.dev>2025-04-16 15:10:27 +0200
commit28d72ba8cee6745bddbf9c07f189d20faea2d2b4 (patch)
tree3b2cebafca8179c745be388de248758c79432da0
parent3e905300138dcd05acc8d877ffe687c4caddc597 (diff)
Setup garbage core, increase energy polling frequency, remove logs
-rw-r--r--src/energy.rs10
-rw-r--r--src/energy/budget.rs4
-rw-r--r--src/scheduler.rs33
3 files changed, 27 insertions, 20 deletions
diff --git a/src/energy.rs b/src/energy.rs
index 7440fe6..68a2a0e 100644
--- a/src/energy.rs
+++ b/src/energy.rs
@@ -165,7 +165,7 @@ impl EnergyService {
// Process any incoming requests
self.handle_requests();
- if i % 30 == 0 {
+ if i % 10 == 0 {
// Update energy measurements
self.update_measurements();
@@ -312,10 +312,10 @@ impl EnergyService {
self.bias = (self.bias * (alpha.recip() * current_bias + ((alpha - 1.) / alpha)))
.clamp(0.1, 5.);
self.system_energy += est_diff;
- println!(
- "Energy estimation: {:.1} rapl: {:.1}, est diff: {:.1} rapl diff: {:.1}, bias: {:.1}, power consumption: {:.1}",
- self.system_energy, rapl, est_diff, rapl_diff, self.bias, power_comsumption_watt,
- );
+ // println!(
+ // "Energy estimation: {:.1} rapl: {:.1}, est diff: {:.1} rapl diff: {:.1}, bias: {:.1}, power consumption: {:.1}",
+ // self.system_energy, rapl, est_diff, rapl_diff, self.bias, power_comsumption_watt,
+ // );
}
}
diff --git a/src/energy/budget.rs b/src/energy/budget.rs
index cdb2b6d..e29554d 100644
--- a/src/energy/budget.rs
+++ b/src/energy/budget.rs
@@ -37,7 +37,7 @@ impl BudgetPolicy for SimpleCappingPolicy {
let actual_energy = energy_service.last_energy_diff;
let energy_cap = self.power_cap.load(std::sync::atomic::Ordering::Relaxed) as f64
* energy_service.last_time_between_measurements.as_secs_f64();
- println!("{actual_energy} {energy_cap}");
+ //println!("{actual_energy} {energy_cap}");
let base_energy_per_process =
energy_cap / process_energies.iter().filter(|(_, e)| **e > 0f64).count() as f64;
let ratio = energy_cap / actual_energy * self.last_ratio;
@@ -49,7 +49,7 @@ impl BudgetPolicy for SimpleCappingPolicy {
//.min((ratio * base_energy_per_process * MAX_BUDGET_FACTOR * 1000.) as u64);
.min(MAX_BUDGET);
if energy != 0.0 {
- println!("budget: {budget} energy: {energy} ratio: {ratio} base: {base_energy_per_process}");
+ //println!("budget: {budget} energy: {energy} ratio: {ratio} base: {base_energy_per_process}");
}
}
diff --git a/src/scheduler.rs b/src/scheduler.rs
index f99d2ec..fa86ef2 100644
--- a/src/scheduler.rs
+++ b/src/scheduler.rs
@@ -30,6 +30,7 @@ pub struct Scheduler<'a> {
own_pid: Pid,
p_cores: Range<i32>,
e_cores: Option<Range<i32>>,
+ garbage_core: i32,
topology: Topology,
to_remove: Vec<Pid>,
e_core_selector: Box<dyn CoreSelector>,
@@ -138,6 +139,7 @@ impl<'a> Scheduler<'a> {
empty_task_infos,
tasks_scheduled: 0,
e_cores,
+ garbage_core: 0,
topology,
e_core_selector,
p_core_selector,
@@ -150,11 +152,17 @@ impl<'a> Scheduler<'a> {
})
}
- fn try_set_up_garbage_cpu(&self) -> Result<bool, TrySendError<FrequencyRequest>> {
+ fn try_set_up_garbage_cpu(&mut self) -> Result<bool, TrySendError<FrequencyRequest>> {
if let Some(e_cores) = &self.e_cores {
+ self.garbage_core = e_cores.end.saturating_sub(1);
+ self.frequency_sender
+ .try_send(FrequencyRequest::SetFrequencyRangeForCore(
+ self.garbage_core as u32,
+ 800_000..=1_200_000,
+ ))?;
self.frequency_sender
.try_send(FrequencyRequest::SetGovernorForCore(
- e_cores.end.saturating_sub(1) as u32,
+ self.garbage_core as u32,
Governor::Powersave,
))?;
Ok(true)
@@ -164,7 +172,7 @@ impl<'a> Scheduler<'a> {
}
fn consume_all_tasks(&mut self) {
- while let Ok(Some(task)) = self.bpf.dequeue_task() {
+ while let Ok(Some(mut task)) = self.bpf.dequeue_task() {
// The scheduler itself has to be scheduled regardless of its energy usage
if task.pid == self.own_pid {
self.task_queue.push_front(task);
@@ -193,6 +201,10 @@ impl<'a> Scheduler<'a> {
// Get current budget for this task
match e.get().read_budget() {
0 => self.no_budget_task_queue.push_back(task),
+ x if x < 1000 => {
+ task.weight = 0;
+ self.task_queue.push_back(task)
+ }
_ => self.task_queue.push_back(task),
}
}
@@ -217,14 +229,9 @@ impl<'a> Scheduler<'a> {
dispatched_task.flags |= RL_CPU_ANY as u64;
}
- // if self
- // .e_cores
- // .as_ref()
- // .map(|e_cores| e_cores.contains(&dispatched_task.cpu))
- // .unwrap_or(false)
- // {
- // dispatched_task.cpu = self.p_core_selector.next_core(task.cpu);
- // }
+ if task.weight == 0 && self.p_cores.contains(&dispatched_task.cpu) {
+ dispatched_task.cpu = self.e_core_selector.next_core(task.cpu);
+ }
if task.pid == self.own_pid {
dispatched_task.slice_ns = SLICE_US * 1000;
@@ -261,8 +268,8 @@ impl<'a> Scheduler<'a> {
if let Some(task) = self.no_budget_task_queue.pop_front() {
let mut dispatched_task = DispatchedTask::new(&task);
- // Low budget tasks go to e-cores
- let cpu = self.e_core_selector.next_core(task.cpu);
+ // Low budget tasks go to garbage_core
+ let cpu = self.garbage_core;
if cpu >= 0 {
dispatched_task.cpu = cpu;