diff options
author | Lennard Kittner <lennardkittner@icloud.com> | 2025-03-28 14:53:57 +0100 |
---|---|---|
committer | Lennard Kittner <lennardkittner@icloud.com> | 2025-03-28 14:53:57 +0100 |
commit | 87833a1b202f111321c05422a320ae67ebfafc75 (patch) | |
tree | a974f4b2a34c576410e11d78767038f1b2598920 | |
parent | 51fc84e9a40580056e60df23f75abbdf1477bda8 (diff) |
Add try_set_up_garbage_cpu
Fix core numbering
-rw-r--r-- | src/scheduler.rs | 31 |
1 files changed, 29 insertions, 2 deletions
diff --git a/src/scheduler.rs b/src/scheduler.rs index a9cad61..42d480e 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -12,6 +12,7 @@ use std::collections::{HashMap, VecDeque}; use std::mem::MaybeUninit; use std::ops::{Range, RangeInclusive}; use std::process; +use std::sync::mpsc::TrySendError; use std::sync::{mpsc, Arc, RwLock}; use std::time::{Duration, Instant}; @@ -93,8 +94,8 @@ impl<'a> Scheduler<'a> { // We assume that the CPU IDs for each core type are assigned contiguously. e_core_ids.sort(); p_core_ids.sort(); - let e_cores = *e_core_ids.first().unwrap_or(&0)..*e_core_ids.last().unwrap_or(&0); - let p_cores = *p_core_ids.first().unwrap_or(&0)..*p_core_ids.last().unwrap_or(&0); + let e_cores = *e_core_ids.first().unwrap_or(&0)..(*e_core_ids.last().unwrap_or(&0) + 1); + let p_cores = *p_core_ids.first().unwrap_or(&0)..(*p_core_ids.last().unwrap_or(&0) + 1); let all_cores = 0..((e_cores.len() + p_cores.len()) as u32); let selector = Box::new(RoundRobinSelector::new(&e_cores)); @@ -139,6 +140,25 @@ impl<'a> Scheduler<'a> { }) } + fn try_set_up_garbage_cpu(&self, cpu: u32) -> Result<bool, TrySendError<FrequencyRequest>> { + if self.shared_cpu_frequency_ranges.read().unwrap().len() <= cpu as usize { + // We wait until shared_cpu_frequency_ranges has been initialized + return Ok(false); + } + let target = self.shared_cpu_frequency_ranges.read().unwrap()[cpu as usize] + .clone() + .min() + .unwrap(); + self.frequency_sender + .try_send(FrequencyRequest::SetTargetFrequencyForCore(cpu, target))?; + self.frequency_sender + .try_send(FrequencyRequest::SetFrequencyRangeForCore( + cpu, + target..=(target), + ))?; + Ok(true) + } + fn consume_all_tasks(&mut self) { while let Ok(Some(task)) = self.bpf.dequeue_task() { // The scheduler itself has to be scheduled regardless of its energy usage @@ -248,7 +268,14 @@ impl<'a> Scheduler<'a> { pub fn run(&mut self) -> Result<UserExitInfo> { let mut i = 0; + // let mut created_garbage_core = false; while !self.bpf.exited() { + // This is how a garbage core could be created + // The core should also be excluded from the e core scheduler + //if !created_garbage_core { + // created_garbage_core = + // self.try_set_up_garbage_cpu(self.e_cores.clone().max().unwrap_or(0) as u32)?; + //} i += 1; self.dispatch_tasks(); if i % 100 == 0 { |