1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
//! Implementation of the prologue/epilog modell.

use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering::Relaxed};

use arraydeque::{ArrayDeque, Wrapping};

use super::epilog::Epilog;
use crate::arch::{cpu, int};
use crate::threading::{BellRinger, Scheduler, Semaphore};
use crate::util::{Align, Ticket, TicketGuard};
use crate::MAX_CPUS;

/// Size of the epilog queue
const LEN: usize = 8;

/// Synchronizes the kernel with interrupts using the Prolog/Epilog Model
pub static GUARD: Guard = Guard::new();

/// Synchronizes the kernel with interrupts using the Prolog/Epilog Model
///
/// The [Guard] is used to synchronize between "normal" core activities (currently
/// just the text output, later system calls) and interrupt handling routines.
/// For this purpose, [Guard] has to contain one ore more "queues",
/// in which [Epilog] objects can be added. This is necessary if the critical
/// section is occupied at the time when an interrupt occurs, and the [Epilog::run]
/// method cannot be executed immediately. The queued epilogs are processed
/// when leaving the critical section.
///
/// The [Guard] protects and synchronizes various global kernel objects.
/// These object are only accessible when the guard is locked, by the control
/// flow holding the lock. This prevents concurrent access and race conditions.
///
/// ## Hints
///
/// The epilog queue is a central data structure, whose consistency
/// must be ensured. The implementation provided by the [ArrayDeque] is not
/// safe against concurrency, i.e. there must never be accesses by two cores
/// at the same time. You need to disable interrupts during operations on the queue.
///
/// For SMP, you need a separate epilog queue for each core,
/// in which each processor serializes *its* epilogs. However, epilogs
/// on different cores could then be executed in parallel, since the
/// critical section is managed separately on a per-core base. This must be
/// prevented by using a global [Ticket] lock to avoid concurrent
/// execution of epilogs -- there must never be more than one epilog
/// active on the whole system at the same time!
///
/// > *Please note:* This [giant lock](https://en.wikipedia.org/wiki/Giant_lock)
/// > (synchronizing all cores) should not be confused with the (core-specific)
/// > flag variable that marks only the entry to the epilog level on the
/// > corresponding core!
///
/// Interrupts should be disabled for as short as possible. Due to this
/// reason, the prologue/epilog model allows epilogs to be interrupted
/// by prologues. This means that interrupts should be
/// [int::enable] "enabled" again before the epilog is
/// executed (this includes notifying the APIC about the "End-Of-Interrupt")
pub struct Guard {
    guarded: Ticket<Guarded>,
    /// Safety: The local data is only accessed by the corresponding core, preventing race condition.
    local: [UnsafeCell<Align<Local>>; MAX_CPUS],
}

unsafe impl Sync for Guard {}

/// Protected and synchronized kernel objects
pub struct Guarded {
    pub scheduler: Scheduler,
    pub bell_ringer: BellRinger,
    pub keyboard_sema: Semaphore,
    pub keyboard_buf: ArrayDeque<char, 16, Wrapping>,
}

struct Local {
    lock: AtomicBool,
    epilogs: ArrayDeque<Epilog, LEN>,
}

impl Guard {
    pub const fn new() -> Self {
        Self {
            guarded: Ticket::new(Guarded {
                scheduler: Scheduler::new(),
                bell_ringer: BellRinger::new(),
                keyboard_sema: Semaphore::new(0),
                keyboard_buf: ArrayDeque::new(),
            }),
            local: [const {
                UnsafeCell::new(Align(Local {
                    lock: AtomicBool::new(false),
                    epilogs: ArrayDeque::new(),
                }))
            }; MAX_CPUS],
        }
    }

    #[allow(clippy::mut_from_ref)]
    fn local(&self) -> &mut Local {
        // The local data is only accessed by the corresponding core.
        unsafe { &mut *self.local[cpu::id()].get() }
    }

    /// Temporary read-only access.
    ///
    /// Safety: beware race conditions!
    pub unsafe fn read(&self) -> &Guarded {
        self.guarded.raw()
    }

    /// Temporary mutable access, which only succeeds if the current thread has already locked the guard.
    ///
    /// Safety: beware race conditions!
    #[allow(clippy::mut_from_ref)]
    pub unsafe fn get(&self) -> &mut Guarded {
        assert!(self.active());
        self.guarded.raw()
    }

    /// Enter the epilog layer or wait synchronously if it is already occupied.
    ///
    /// A guard object is returned that unlocks the epilog layer when it falls out of scope.
    #[track_caller]
    pub fn enter(&self) -> GuardKey<'_> {
        let caller = core::panic::Location::caller();

        int::suppress(|| {
            if let Err(_) = self
                .local()
                .lock
                .compare_exchange(false, true, Relaxed, Relaxed)
            {
                // TODO: Investigate rare panic!
                panic!("Guard double entry {caller}");
            }
        });

        GuardKey {
            inner: Some(self.guarded.lock()),
        }
    }

    /// Register the given epilog, which is either executed directly if possible
    /// or it is enqueued for later execution.
    pub fn relay(&self, mut epilog: Epilog) {
        // We have to secure locking the CPU to prevent a timer interrupt scheduling
        // us onto a different core after reading the CPUID (but before setting "epi_flag")
        if self
            .local()
            .lock
            .compare_exchange(false, true, Relaxed, Relaxed)
            .is_ok()
        {
            // Enable interrupts (interrupt_handler should already have sent the ACK IRQ via LAPIC)
            int::enable(true);
            // Interrupts shall be enabled on acquiring the big kernel lock
            let mut g = self.guarded.lock();
            // Process epilog
            epilog.run(&mut g);

            core::mem::forget(g);

            self.leave();
        } else if self.local().epilogs.push_back(epilog).is_err() {
            // We have lost an interrupt!
            print!(dbg: "!");
        }
    }

    /// Leave the epilog layer.
    pub fn leave(&self) {
        let status = int::enabled();

        int::enable(false);
        while let Some(mut epilog) = self.local().epilogs.pop_front() {
            int::enable(status);

            // Here threads might switch!
            epilog.run(unsafe { self.guarded.raw() });

            int::enable(false);
        }

        // Unlock the guard
        unsafe { self.guarded.unlock() };
        self.local().lock.store(false, Relaxed);

        int::enable(status);
    }

    /// Returns wether the layer 1/2 is active
    pub fn active(&self) -> bool {
        self.local().lock.load(Relaxed)
    }

    /// Enters layer 1/2, runs `f`, and leaves
    pub fn run<R>(&self, f: impl FnOnce(&mut Guarded) -> R) -> R {
        f(&mut self.enter())
    }
}

/// Provides access to protected objects and unlocks the guard
/// when it goes out of scope.
pub struct GuardKey<'a> {
    inner: Option<TicketGuard<'a, Guarded>>,
}

impl Deref for GuardKey<'_> {
    type Target = Guarded;

    fn deref(&self) -> &Self::Target {
        self.inner.as_ref().unwrap()
    }
}

impl DerefMut for GuardKey<'_> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        self.inner.as_mut().unwrap()
    }
}

impl Drop for GuardKey<'_> {
    fn drop(&mut self) {
        // already unlocked as part of leave
        core::mem::forget(self.inner.take());
        GUARD.leave();
    }
}