summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDennis Kobert <dennis@kobert.dev>2025-03-08 21:05:46 +0100
committerDennis Kobert <dennis@kobert.dev>2025-03-08 21:05:46 +0100
commit06dd6966ad27f0c52c56092dd2f45a06541e5c6c (patch)
tree9364f3e275f0fc3d6dd5376d9ca4fe4b8f6c4732 /src
parent6c9a9bca5fe6e6d6cac8f3d68c042daa3175e1be (diff)
Changes
Diffstat (limited to 'src')
-rw-r--r--src/e_core_selector.rs5
-rw-r--r--src/main.rs38
2 files changed, 16 insertions, 27 deletions
diff --git a/src/e_core_selector.rs b/src/e_core_selector.rs
index c4c2c4a..6952e80 100644
--- a/src/e_core_selector.rs
+++ b/src/e_core_selector.rs
@@ -8,7 +8,7 @@ pub trait ECoreSelector {
pub struct RoundRobinSelector {
offset: u32,
num_cores: u32,
- last_used: u32
+ last_used: u32,
}
impl RoundRobinSelector {
@@ -22,8 +22,9 @@ impl RoundRobinSelector {
}
impl ECoreSelector for RoundRobinSelector {
+ // TODO: use task cpu if suitable
fn next_core(&mut self) -> i32 {
self.last_used += 1;
(self.offset + (self.last_used % self.num_cores)) as i32
}
-} \ No newline at end of file
+}
diff --git a/src/main.rs b/src/main.rs
index 8a94ff6..c477a0f 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -14,7 +14,7 @@ use clap::{Arg, ArgAction, Command};
use dashmap::DashMap;
use e_core_selector::{ECoreSelector, RoundRobinSelector};
use mock::perf::PerfEstimator;
-use scx_utils::UserExitInfo;
+use scx_utils::{Topology, UserExitInfo};
use libbpf_rs::OpenObject;
use mock::{KernelDriver, KernelModule, MockModule};
@@ -47,6 +47,7 @@ struct Scheduler<'a> {
own_pid: Pid,
p_cores: Range<i32>,
e_cores: Range<i32>,
+ topology: Topology,
e_core_selector: Box<dyn ECoreSelector>,
// reciever: Receiver<(Pid, Response)>,
sender: SyncSender<(Pid, Request)>,
@@ -75,6 +76,8 @@ impl<'a> Scheduler<'a> {
let map = Arc::new(map);
let thread_map = map.clone();
+ let topology = Topology::new().unwrap();
+
let read_cores = |core_type: &str| {
let e_cores_file = File::open(format!("/sys/devices/cpu_{core_type}/cpus"))?;
let e_cores_reader = BufReader::new(e_cores_file);
@@ -144,6 +147,7 @@ impl<'a> Scheduler<'a> {
own_pid: process::id() as i32,
p_cores,
e_cores,
+ topology,
e_core_selector: selector,
sender: request_send,
// reciever: response_recieve,
@@ -197,7 +201,7 @@ impl<'a> Scheduler<'a> {
self.task_queue.push_back(task);
}
dashmap::try_result::TryResult::Locked => {
- // println!("locked");
+ println!("locked");
self.task_queue.push_back(task);
}
}
@@ -239,7 +243,6 @@ impl<'a> Scheduler<'a> {
// Notify the BPF component of the number of pending tasks and immediately give a
// chance to run to the dispatched task.
- //TODO: should the pending tasks include the ones that don't have any budget left?
self.bpf.notify_complete(
self.task_queue.len() as u64 + self.no_budget_task_queue.len() as u64,
);
@@ -254,6 +257,9 @@ impl<'a> Scheduler<'a> {
dispatched_task.flags |= RL_CPU_ANY as u64;
}
+ if task.pid == self.own_pid {
+ dispatched_task.slice_ns = SLICE_US * 1000;
+ }
dispatched_task.slice_ns = SLICE_US;
self.bpf.dispatch_task(&dispatched_task).unwrap();
self.bpf.notify_complete(
@@ -263,10 +269,10 @@ impl<'a> Scheduler<'a> {
}
fn dispatch_tasks(&mut self) {
- loop {
- //TODO: we should probably not do this every time, but instead wait a predefined amount of time between invocations
- // self.reset_budgets_and_garbage_collect();
+ //TODO: we should probably not do this every time, but instead wait a predefined amount of time between invocations
+ self.reset_budgets_and_garbage_collect();
+ loop {
// Consume all tasks before dispatching any.
self.consume_all_tasks();
@@ -280,24 +286,6 @@ impl<'a> Scheduler<'a> {
if self.task_queue.is_empty() && self.no_budget_task_queue.is_empty() {
self.bpf.notify_complete(0);
- // self.sender.try_send((0, Request::Heartbeat)).unwrap();
- // for (pid, response) in self.reciever.try_iter() {
- // let Some(task_state) = self.managed_tasks.get_mut(&(pid)) else {
- // continue;
- // };
- // match response {
- // Response::Parent(parent) => {
- // task_state.parent = parent;
- // }
- // Response::Energy(energy) => {
- // let used_energy = energy - task_state.previous_energy_usage.min(energy);
- // task_state.previous_energy_usage = energy;
-
- // task_state.budget -= used_energy.min(task_state.budget);
- // }
- // }
- // }
-
break;
}
}
@@ -325,7 +313,7 @@ impl<'a> Scheduler<'a> {
value.budget = self.maximum_budget;
} else {
self.sender
- .try_send((*key as i32, Request::RemoveTask))
+ .try_send(({ *key }, Request::RemoveTask))
.unwrap();
}
was_scheduled