1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
//! The scheduler that coordinates the execution of threads

use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering::Relaxed;

use arraydeque::ArrayDeque;

use crate::arch::int::lapic::{IPIDestination, LAPIC};
use crate::arch::{cpu, int};
use crate::interrupts::guard::GUARD;
use crate::interrupts::Vector;
use crate::util::{Align, PerCPU};
use crate::{INIT_STACKS, MAX_CPUS};

use super::Thread;

pub const APPS: usize = 10;

static mut THREAD_STACKS: [[u32; 0x4000]; APPS] = [[0; 0x4000]; APPS];

/// The scheduler plans the threads' execution order and, from this,
/// selects the next thread to be running.
///
/// The scheduler manages the ready queue,
/// that is the list of threads that are ready to execute. The scheduler
/// arranges threads in a FIFO order, that is, when a thread is set ready, it
/// will be appended to the end of the queue, while threads to be executed are
/// taken from the front of the queue.
pub struct Scheduler {
    /// Ready queue
    ready: ArrayDeque<usize, APPS>,
    /// Contains all threads
    ///
    /// Note: the idle threads have the id and index of their corresponding CPU
    threads: [Option<Thread>; MAX_CPUS + APPS],
    /// Next thread id
    next_id: AtomicUsize,
    /// Per-CPU data
    local: PerCPU<Local>,
}

/// Private, CPU-local data
struct Local {
    /// Active thread (per core)
    active: Option<usize>,
    /// Thread to be removed after dispatch
    exited: Option<usize>,
}

impl Scheduler {
    /// Construct the scheduler.
    pub const fn new() -> Self {
        Self {
            ready: ArrayDeque::new(),
            threads: [const { None }; MAX_CPUS + APPS],
            next_id: AtomicUsize::new(MAX_CPUS),
            local: PerCPU::new(
                [const {
                    Align(Local {
                        active: None,
                        exited: None,
                    })
                }; MAX_CPUS],
            ),
        }
    }

    /// Add and ready a new thread to the scheduler.
    pub fn add(&mut self, mut thread: Thread) -> usize {
        let id = self.next_id.fetch_add(1, Relaxed);
        thread.id = id;
        thread.init(unsafe { &mut THREAD_STACKS[id - MAX_CPUS] });

        self.threads[id] = Some(thread);
        self.ready(id);
        id
    }

    fn is_idle(thread: usize) -> bool {
        thread < MAX_CPUS
    }

    /// Include a thread in scheduling decisions.
    ///
    /// This method will register a thread for scheduling. It will be appended
    /// to the ready queue and dispatched once its time has come.
    ///
    /// Note: New threads have to be added first with [Scheduler::add].
    pub fn ready(&mut self, thread: usize) {
        assert!(thread < self.threads.len() && self.threads[thread].is_some());
        self.ready.push_back(thread).unwrap();
    }

    pub fn active(&self) -> Option<usize> {
        self.local.get().active
    }

    pub fn thread(&self, thread: usize) -> Option<&Thread> {
        self.threads[thread].as_ref()
    }
    pub fn thread_mut(&mut self, thread: usize) -> Option<&mut Thread> {
        self.threads[thread].as_mut()
    }

    /// Start the scheduling.
    /// This function does not return.
    pub fn schedule(&mut self) -> ! {
        // Initialize own idle thread
        let idle_id = cpu::id();
        let mut idle = Thread::new(Self::idle_action);
        idle.id = idle_id;
        idle.init(unsafe { &mut INIT_STACKS[idle_id] });

        self.threads[idle_id] = Some(idle);

        let next = self.next();
        self.dispatch(next);

        unreachable!();
    }

    /// Initiates a thread switch.
    /// This function returns then in the context of the next thread.
    ///
    /// If `ready`, the currently running thread is added back to the ready list.
    pub fn resume(&mut self, ready: bool) {
        if let Some(active) = self.active().and_then(|t| self.thread(t)) {
            if active.exited {
                if let Some(exited) = self.local.get().exited.replace(active.id) {
                    // If there is already a dead thread
                    assert_ne!(exited, active.id, "Cannot exit while active");
                    self.threads[exited] = None;
                }
            } else if !Self::is_idle(active.id) && ready {
                self.ready.push_back(active.id).unwrap();
            }
        }

        let next = self.next();
        self.dispatch(next);

        if let Some(thread) = self.local.get().exited.take() {
            self.threads[thread] = None;
        }
    }

    /// Terminates the currently running thread, directly continue with the next one.
    pub fn exit(&mut self) -> ! {
        self.thread_mut(self.active().unwrap()).unwrap().exited = true;
        self.resume(false);

        unreachable!();
    }

    /// Terminates `t`, which might be running or waiting.
    pub fn kill(&mut self, thread: usize) -> bool {
        if Self::is_idle(thread) || self.threads[thread].is_none() {
            return false;
        }
        println!(dbg: "kill {thread}");

        if self.local.get().active == Some(thread) {
            self.exit();
        } else {
            if let Some(thread) = &mut self.threads[thread] {
                // will be removed from ready list automatically
                thread.exited = true;
            } else {
                return true;
            }

            // interrupt the running thread
            for cpu in cpu::iter() {
                if unsafe { self.local.get_raw(cpu).active == Some(thread) } {
                    LAPIC.ipi_send(IPIDestination::Physical(cpu as _), Vector::Assassin);
                    break;
                }
            }
        }
        true
    }

    /// Updates the life pointer to next and issues a thread change from
    /// the old to the new life pointer.
    fn dispatch(&mut self, next: usize) {
        let previous = if let Some(previous) = self.local.get().active.replace(next) {
            // Usually we cannot have two mutable references, but I'm sure its ok here
            // -> Lifetime hack: cast it into a pointer and dereference it again
            Some(unsafe { &mut *(self.thread_mut(previous).unwrap() as *mut Thread) })
        } else {
            None
        };

        let next = self.thread(next).unwrap();

        if let Some(last) = previous {
            last.resume(next);
        } else {
            next.go();
        }
    }

    /// Helper to retrieve next Thread.
    fn next(&mut self) -> usize {
        while let Some(thread_id) = self.ready.pop_front() {
            if let Some(thread) = &self.threads[thread_id] {
                if thread.exited {
                    // remove exited threads
                    self.threads[thread_id] = None;
                } else {
                    return thread.id;
                }
            }
        }
        // idle thread
        cpu::id()
    }

    /// Action of the idle thread.
    pub extern "C" fn idle_action() {
        loop {
            int::enable(false);
            if unsafe { GUARD.read().scheduler.ready.is_empty() } {
                let no_timer = cpu::id() != 0 && unsafe { GUARD.read().bell_ringer.is_empty() };
                if no_timer {
                    LAPIC.timer_enable(false);
                }

                // enable int and wait
                int::idle();

                if no_timer {
                    LAPIC.timer_enable(true);
                }
            } else {
                int::enable(true);
                let mut guarded = GUARD.enter();
                guarded.scheduler.resume(false);
            }
        }
    }
}