diff options
Diffstat (limited to 'src/scheduler.rs')
-rw-r--r-- | src/scheduler.rs | 90 |
1 files changed, 56 insertions, 34 deletions
diff --git a/src/scheduler.rs b/src/scheduler.rs index bcc3134..f99d2ec 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -1,6 +1,6 @@ use crate::bpf::*; use crate::energy::{self, Request as EnergyRequest, TaskInfo}; -use crate::freq::{self, FrequencyKHZ, Request as FrequencyRequest}; +use crate::freq::{self, FrequencyKHZ, Governor, Request as FrequencyRequest}; use crate::core_selector::{CoreSelector, RoundRobinSelector}; use anyhow::Result; @@ -29,7 +29,7 @@ pub struct Scheduler<'a> { //TODO: also consider Pids of children own_pid: Pid, p_cores: Range<i32>, - e_cores: Range<i32>, + e_cores: Option<Range<i32>>, topology: Topology, to_remove: Vec<Pid>, e_core_selector: Box<dyn CoreSelector>, @@ -84,8 +84,23 @@ impl<'a> Scheduler<'a> { let p_cores = *p_core_ids.first().unwrap_or(&0)..(*p_core_ids.last().unwrap_or(&-1) + 1); let all_cores = 0..((e_cores.len() + p_cores.len()) as u32); + let e_cores = if !e_cores.is_empty() { + Some(e_cores) + } else { + None + }; + let p_core_selector = Box::new(RoundRobinSelector::new(&p_cores)); - let e_core_selector = Box::new(RoundRobinSelector::new(&e_cores)); + let e_core_selector = if let Some(e_cores) = &e_cores { + // reserve the last e core as garbage core + Box::new(RoundRobinSelector::new( + &(e_cores.start..e_cores.end.saturating_sub(2)), + )) + } else { + // fallback on systems without e cores + Box::new(RoundRobinSelector::new(&(0..1))) + }; + let to_remove = Vec::with_capacity(1000); let frequency_sender = freq::start_frequency_service( @@ -135,23 +150,17 @@ impl<'a> Scheduler<'a> { }) } - fn try_set_up_garbage_cpu(&self, cpu: u32) -> Result<bool, TrySendError<FrequencyRequest>> { - if self.shared_cpu_frequency_ranges.read().unwrap().len() <= cpu as usize { - // We wait until shared_cpu_frequency_ranges has been initialized - return Ok(false); + fn try_set_up_garbage_cpu(&self) -> Result<bool, TrySendError<FrequencyRequest>> { + if let Some(e_cores) = &self.e_cores { + self.frequency_sender + .try_send(FrequencyRequest::SetGovernorForCore( + e_cores.end.saturating_sub(1) as u32, + Governor::Powersave, + ))?; + Ok(true) + } else { + Ok(false) } - let target = self.shared_cpu_frequency_ranges.read().unwrap()[cpu as usize] - .clone() - .min() - .unwrap(); - self.frequency_sender - .try_send(FrequencyRequest::SetTargetFrequencyForCore(cpu, target))?; - self.frequency_sender - .try_send(FrequencyRequest::SetFrequencyRangeForCore( - cpu, - target..=(target), - ))?; - Ok(true) } fn consume_all_tasks(&mut self) { @@ -165,7 +174,11 @@ impl<'a> Scheduler<'a> { // Check if we've seen this task before match self.managed_tasks.entry(task.pid) { std::collections::hash_map::Entry::Vacant(e) => { - let is_e_core = self.e_cores.contains(&task.cpu); + let is_e_core = self + .e_cores + .as_ref() + .map(|e_cores| e_cores.contains(&task.cpu)) + .unwrap_or(false); // New task - register it with the energy service let task_info = self.empty_task_infos.recv().unwrap(); task_info.set_cpu(task.cpu); @@ -200,13 +213,18 @@ impl<'a> Scheduler<'a> { if cpu >= 0 { dispatched_task.cpu = cpu; } else { - dispatched_task.cpu = self.p_core_selector.next_core(task.cpu); - //dispatched_task.flags |= RL_CPU_ANY as u64; + //dispatched_task.cpu = self.p_core_selector.next_core(task.cpu); + dispatched_task.flags |= RL_CPU_ANY as u64; } - if self.e_cores.contains(&dispatched_task.cpu) { - dispatched_task.cpu = self.p_core_selector.next_core(task.cpu); - } + // if self + // .e_cores + // .as_ref() + // .map(|e_cores| e_cores.contains(&dispatched_task.cpu)) + // .unwrap_or(false) + // { + // dispatched_task.cpu = self.p_core_selector.next_core(task.cpu); + // } if task.pid == self.own_pid { dispatched_task.slice_ns = SLICE_US * 1000; @@ -219,7 +237,12 @@ impl<'a> Scheduler<'a> { panic!(); } - let running_on_e_core = self.e_cores.contains(&dispatched_task.cpu); + let running_on_e_core = self + .e_cores + .as_ref() + .map(|e_cores| e_cores.contains(&dispatched_task.cpu)) + .unwrap_or(false); + if let Some(entry) = self.managed_tasks.get_mut(&task.pid) { entry.set_cpu(dispatched_task.cpu); entry.set_running_on_e_core(running_on_e_core); @@ -247,7 +270,12 @@ impl<'a> Scheduler<'a> { eprintln!("e core scheduler set cpu to -1"); } - let running_on_e_core = self.e_cores.contains(&dispatched_task.cpu); + let running_on_e_core = self + .e_cores + .as_ref() + .map(|e_cores| e_cores.contains(&dispatched_task.cpu)) + .unwrap_or(false); + if let Some(entry) = self.managed_tasks.get_mut(&task.pid) { entry.set_cpu(dispatched_task.cpu); entry.set_running_on_e_core(running_on_e_core); @@ -299,15 +327,9 @@ impl<'a> Scheduler<'a> { } pub fn run(&mut self) -> Result<UserExitInfo> { + self.try_set_up_garbage_cpu()?; let mut i = 0; - // let mut created_garbage_core = false; while !self.bpf.exited() { - // This is how a garbage core could be created - // The core should also be excluded from the e core scheduler - //if !created_garbage_core { - // created_garbage_core = - // self.try_set_up_garbage_cpu(self.e_cores.clone().max().unwrap_or(0) as u32)?; - //} i += 1; self.dispatch_tasks(); if i % 100 == 0 { |