summaryrefslogtreecommitdiff
path: root/src/energy/trackers/perf.rs
blob: 96c73d96c1c459636a89a25951c197321393e0ef (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
use std::collections::HashMap;

use burn::tensor::Tensor;
use perf_event::{
    events::{Event, Hardware, Software},
    Builder, Counter, Group,
};

use crate::freq::FrequencyKHZ;
use std::sync::{Arc, RwLock};

use crate::energy::Estimator;
use crate::model::ArrayBackend;

pub struct PerfEstimator {
    registry: HashMap<u64, Counters>,
    model_p: crate::model::Net<ArrayBackend>,
    model_e: crate::model::Net<ArrayBackend>,
    device: <ArrayBackend as burn::prelude::Backend>::Device,
    shared_cpu_current_frequencies: Arc<RwLock<Vec<FrequencyKHZ>>>,
}

impl PerfEstimator {
    pub fn new(shared_cpu_current_frequencies: Arc<RwLock<Vec<FrequencyKHZ>>>) -> Self {
        // let model_p = crate::model::load_model("perf_pcore.pt");
        let model_p = crate::model::load_model("perf.pt");
        let model_e = crate::model::load_model("perf.pt");
        // let model_e = crate::model::load_model("perf_ecore.pt");
        Self {
            registry: Default::default(),
            model_p,
            model_e,
            device: Default::default(),
            shared_cpu_current_frequencies,
        }
    }
}

struct Counters {
    group: Group,
    counters: Vec<Counter>,
    old_time: u64,
    old_total_energy: f64,
    cpu: i32,
    running_on_e_core: bool,
}

static EVENT_TYPES_P: &[Event] = &[
    Event::Hardware(Hardware::BRANCH_INSTRUCTIONS),
    Event::Hardware(Hardware::BRANCH_MISSES),
    Event::Hardware(Hardware::CACHE_MISSES),
    Event::Hardware(Hardware::CACHE_REFERENCES),
    Event::Hardware(Hardware::CPU_CYCLES),
    Event::Hardware(Hardware::INSTRUCTIONS),
    Event::Hardware(Hardware::REF_CPU_CYCLES),
];
static EVENT_TYPES_E: &[Event] = &[
    Event::Hardware(Hardware::BRANCH_MISSES),
    Event::Hardware(Hardware::CACHE_MISSES),
    Event::Hardware(Hardware::CACHE_REFERENCES),
    Event::Hardware(Hardware::CPU_CYCLES),
    Event::Hardware(Hardware::INSTRUCTIONS),
    // Event::Hardware(Hardware::REF_CPU_CYCLES),
];

impl Estimator for PerfEstimator {
    fn start_trace(&mut self, pid: u64, cpu: i32, running_on_e_core: bool) -> Result<(), ()> {
        let mut group = match Group::new_with_pid_and_cpu(pid as i32, -1) {
            Ok(counters) => counters,
            Err(e) => {
                eprintln!(
                    "Failed to create performance counter group for PID {}: {}",
                    pid, e
                );
                return Err(());
            }
        };

        let counters: Result<Vec<_>, _> = if running_on_e_core || true {
            // println!("starting e core counter");
            EVENT_TYPES_E
        } else {
            EVENT_TYPES_P
        }
        .iter()
        .map(|kind| {
            Builder::new()
                // .group(&mut group)
                .kind(kind.clone())
                .observe_pid(pid as i32)
                .inherit_thread(true)
                .build()
        })
        .collect();

        let mut counters = match counters {
            Ok(counters) => counters,
            Err(e) => {
                eprintln!(
                    "Failed to create performance counter group for PID {}: {}",
                    pid, e
                );
                return Err(());
            }
        };

        for counter in counters.iter_mut() {
            if let Err(e) = counter.enable() {
                eprintln!("Failed to enable performance counters: {}", e);
                return Err(());
            }
            if let Err(e) = counter.reset() {
                eprintln!("Failed to reset performance counters: {}", e);
                return Err(());
            }
        }

        let old_time = counters[0].read_count_and_time().unwrap().time_running;
        let counters = Counters {
            counters,
            group,
            old_time,
            old_total_energy: 0.,
            cpu,
            running_on_e_core,
        };
        self.registry.insert(pid, counters);
        Ok(())
    }

    fn stop_trace(&mut self, pid: u64) {
        self.registry.remove(&pid);
    }

    fn update_information(&mut self, pid: u64, cpu: i32, is_ecore: bool) {
        let mut core_type_changed = false;
        if let Some(info) = self.registry.get_mut(&pid) {
            info.cpu = cpu;
            info.running_on_e_core = is_ecore;
            core_type_changed = is_ecore != info.running_on_e_core;
        } else {
            eprintln!("Tried to update an unknown task")
        }
        if core_type_changed {
            // println!("migrating task to {}", cpu);
            // self.stop_trace(pid);
            // self.start_trace(pid, cpu, is_ecore);
        }
    }

    fn read_consumption(&mut self, pid: u64) -> Option<f64> {
        let Some(counters) = self.registry.get_mut(&pid) else {
            println!("did not find counters for {pid}");
            return None;
        };

        // let counts = match counters.group.read() {
        //     Ok(counts) => counts,
        //     Err(e) => {
        //         println!("failed to read group: {e}");
        //         return None;
        //     }
        // };
        let time_running_ns = counters.counters[0]
            .read_count_and_time()
            .unwrap()
            .time_running;
        if time_running_ns - counters.old_time == 0 {
            return None;
        }
        let correction_factor = 10_000_000. / (time_running_ns - counters.old_time) as f64;
        counters.old_time = time_running_ns;

        let mut values = vec![
            //if counters.running_on_e_core { 1. } else { 0. },
            (self.shared_cpu_current_frequencies.read().unwrap()[counters.cpu as usize] / 1000)
                as f64,
        ];
        for ty in counters.counters.iter_mut() {
            // let count: u64 = counts[ty];
            let count: u64 = ty.read().unwrap();
            values.push((count as f64) * correction_factor);
        }
        values.push(values[4]);

        let result = if counters.running_on_e_core {
            &self.model_e
        } else {
            &self.model_p
        }
        .forward(Tensor::from_floats(&values.as_slice()[0..], &self.device));

        let energy = result.into_scalar() as f64;
        if counters.running_on_e_core {
            // dbg!(energy);
        }
        counters.old_total_energy += energy / correction_factor;
        counters.group.reset().unwrap();
        for counter in counters.counters.iter_mut() {
            counter.reset().unwrap();
        }
        Some(energy / correction_factor)
    }
}