rstubs/threading/
scheduler.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
//! The scheduler that coordinates the execution of threads

use core::cell::UnsafeCell;
use core::ptr;
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering::Relaxed;

use arraydeque::ArrayDeque;

use super::Thread;
use crate::arch::int::lapic::{Destination, LAPIC};
use crate::arch::{cpu, int};
use crate::interrupts::guard::GUARD;
use crate::interrupts::Vector;
use crate::{INIT_STACKS, MAX_CPUS};

pub const APPS: usize = 10;

static mut THREAD_STACKS: [[u32; 0x4000]; APPS] = [[0; 0x4000]; APPS];

/// The scheduler plans the threads' execution order and, from this,
/// selects the next thread to be running.
///
/// The scheduler manages the ready queue,
/// that is the list of threads that are ready to execute. The scheduler
/// arranges threads in a FIFO order, that is, when a thread is set ready, it
/// will be appended to the end of the queue, while threads to be executed are
/// taken from the front of the queue.
pub struct Scheduler {
    /// Ready queue
    ready: ArrayDeque<usize, APPS>,
    /// Contains all threads
    ///
    /// Note: the idle threads have the id and index of their corresponding CPU
    threads: [Option<Thread>; MAX_CPUS + APPS],
    /// Next thread id
    next_id: AtomicUsize,
    /// Per-CPU data
    local: UnsafeCell<[Local; MAX_CPUS]>,
}

/// Private, CPU-local data
#[repr(align(64))]
struct Local {
    /// Active thread (per core)
    active: Option<usize>,
    /// Thread to be removed after dispatch
    exited: Option<usize>,
}

impl Scheduler {
    /// Construct the scheduler.
    pub const fn new() -> Self {
        Self {
            ready: ArrayDeque::new(),
            threads: [const { None }; MAX_CPUS + APPS],
            next_id: AtomicUsize::new(MAX_CPUS),
            local: UnsafeCell::new(
                [const {
                    Local {
                        active: None,
                        exited: None,
                    }
                }; MAX_CPUS],
            ),
        }
    }

    /// Access local data for the current CPU.
    fn local(&self) -> &mut Local {
        assert!(
            !int::enabled() || GUARD.active(),
            "Possible Race Condition!"
        );
        &mut (unsafe { &mut *self.local.get() })[cpu::id()]
    }

    /// Add and ready a new thread to the scheduler.
    pub fn add(&mut self, mut thread: Thread) -> usize {
        let id = self.next_id.fetch_add(1, Relaxed);
        thread.id = id;
        thread.init(unsafe { &mut THREAD_STACKS[id - MAX_CPUS] });
        self.threads[id] = Some(thread);
        self.ready(id);
        id
    }

    pub const fn is_idle(thread: usize) -> bool {
        thread < MAX_CPUS
    }

    /// Include a thread in scheduling decisions.
    ///
    /// This method will register a thread for scheduling. It will be appended
    /// to the ready queue and dispatched once its time has come.
    ///
    /// Note: New threads have to be added first with [Scheduler::add].
    pub fn ready(&mut self, thread: usize) {
        assert!(self.threads[thread].is_some());
        if let Some(active) = self.active()
            && Self::is_idle(active)
        {
            // If the CPU is idle, we need to wake other CPUs up
            LAPIC.send(Destination::Others, Vector::WakeUp);
        }
        self.ready.push_back(thread).unwrap();
    }

    /// Returns the thread that currently runs on this CPU.
    pub fn active(&self) -> Option<usize> {
        self.local().active
    }

    /// Returns the CPU where the thread is currently running.
    pub fn is_active(&self, thread: usize) -> Option<usize> {
        let locals = unsafe { &*self.local.get() };
        cpu::iter().find(|&cpu| locals[cpu].active == Some(thread))
    }

    /// Returns a reference to the thread with the given id.
    pub fn thread(&self, thread: usize) -> Option<&Thread> {
        self.threads[thread].as_ref()
    }
    /// Returns a reference to the thread with the given id.
    pub fn thread_mut(&mut self, thread: usize) -> Option<&mut Thread> {
        self.threads[thread].as_mut()
    }

    /// Start the scheduling.
    /// This function does not return.
    pub fn schedule(&mut self) -> ! {
        // Initialize own idle thread
        let idle_id = cpu::id();
        let mut idle = Thread::new(Self::idle_action);
        idle.id = idle_id;
        idle.init(unsafe { &mut INIT_STACKS[idle_id] });
        self.threads[idle_id] = Some(idle);

        let next = self.next();
        self.dispatch(next);

        unreachable!();
    }

    /// Initiates a thread switch.
    /// This function returns then in the context of the next thread.
    ///
    /// If `ready`, the currently running thread is added back to the ready list.
    pub fn resume(&mut self, ready: bool) {
        if let Some(active) = self.active().and_then(|t| self.thread(t)) {
            if active.exited {
                if let Some(exited) = self.local().exited.replace(active.id) {
                    // If there is already a dead thread
                    assert_ne!(exited, active.id, "Cannot exit while active");
                    self.threads[exited] = None;
                }
            } else if !Self::is_idle(active.id) && ready {
                // Do not schedule idle threads!
                self.ready.push_back(active.id).unwrap();
            }
        }

        let next = self.next();
        self.dispatch(next);

        if let Some(thread) = self.local().exited.take() {
            self.threads[thread] = None;
        }
    }

    /// Terminates the currently running thread, directly continue with the next one.
    pub fn exit(&mut self) -> ! {
        self.thread_mut(self.active().unwrap()).unwrap().exited = true;
        self.resume(false);

        unreachable!();
    }

    /// Terminates `t`, which might be running on another CPU.
    pub fn kill(&mut self, thread: usize) -> bool {
        if Self::is_idle(thread) || self.threads[thread].is_none() {
            return false;
        }
        debug!("kill {thread}");

        if self.local().active == Some(thread) {
            self.exit();
        } else {
            if let Some(thread) = &mut self.threads[thread] {
                // will be removed from ready list automatically
                thread.exited = true;
            } else {
                return true;
            }
            // interrupt the running thread
            if let Some(cpu) = self.is_active(thread) {
                // interrupt the running thread
                LAPIC.send(Destination::Physical(cpu as _), Vector::Assassin);
            }
        }
        true
    }

    /// Updates the life pointer to next and issues a thread change from
    /// the old to the new life pointer.
    fn dispatch(&mut self, next: usize) {
        let previous = if let Some(previous) = self.local().active.replace(next) {
            // Usually we cannot have two mutable references, but I'm sure its ok here
            // -> Lifetime hack: cast it into a pointer and dereference it again
            Some(unsafe { &mut *ptr::from_mut(self.thread_mut(previous).unwrap()) })
        } else {
            None
        };

        let next = self.thread(next).unwrap();

        if let Some(last) = previous {
            last.resume(next);
        } else {
            next.go();
        }
    }

    /// Helper to retrieve next Thread.
    fn next(&mut self) -> usize {
        while let Some(thread_id) = self.ready.pop_front() {
            if let Some(thread) = &mut self.threads[thread_id] {
                if thread.exited {
                    // remove exited threads
                    self.threads[thread_id] = None;
                } else {
                    return thread.id;
                }
            }
        }
        // idle thread
        cpu::id()
    }

    /// Action of the idle thread.
    pub extern "C" fn idle_action() -> ! {
        loop {
            int::enable(false);
            if unsafe { GUARD.read().scheduler.ready.is_empty() } {
                let no_timer = cpu::id() != 0 && unsafe { GUARD.read().bell_ringer.is_empty() };
                if no_timer {
                    LAPIC.timer_enable(false);
                }

                // enable int and wait
                int::idle();

                if no_timer {
                    LAPIC.timer_enable(true);
                }
            } else {
                int::enable(true);
                let mut guarded = GUARD.enter();
                guarded.scheduler.resume(false);
            }
        }
    }
}