rstubs/interrupts/
guard.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
//! Implementation of the prologue/epilogue modell.

use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::AtomicBool;
use core::sync::atomic::Ordering::Relaxed;

use arraydeque::ArrayDeque;

use super::epilogue::Epilogue;
use crate::arch::{cpu, int};
use crate::device::KeyBuffer;
use crate::threading::{BellRinger, Scheduler};
use crate::util::{Ticket, TicketGuard};
use crate::MAX_CPUS;

/// Size of the epilogue queue
const LEN: usize = 8;

/// Synchronizes the kernel with interrupts using the Prologue/Epilogue Model
pub static GUARD: Guard = Guard::new();

/// Synchronizes the kernel with interrupts using the Prologue/Epilogue Model
///
/// The [Guard] is used to synchronize between "normal" core activities (currently
/// just the text output, later system calls) and interrupt handling routines.
/// For this purpose, [Guard] has to contain one or more "queues",
/// in which [Epilogue] objects can be added. This is necessary if the critical
/// section is occupied at the time when an interrupt occurs, and the [Epilogue::run]
/// method cannot be executed immediately. The queued epilogues are processed
/// when leaving the critical section.
///
/// The [Guard] protects and synchronizes various global kernel objects.
/// These objects are only accessible when the guard is locked, by the control
/// flow holding the lock. This prevents concurrent access and race conditions.
///
/// ## Hints
///
/// The epilogue queue is a central data structure, whose consistency
/// must be ensured. The implementation provided by the [ArrayDeque] is not
/// safe against concurrency, i.e. there must never be accesses by two cores
/// at the same time. You need to disable interrupts during operations on the queue.
///
/// For SMP, you need a separate epilogue queue for each core,
/// in which each processor serializes *its* epilogues. However, epilogues
/// on different cores could then be executed in parallel, since the
/// critical section is managed separately on a per-core basis. This must be
/// prevented by using a global [Ticket] lock to avoid concurrent
/// execution of epilogues -- there must never be more than one epilogue
/// active on the whole system at the same time!
///
/// > *Please note:* This [giant lock](https://en.wikipedia.org/wiki/Giant_lock)
/// > (synchronizing all cores) should not be confused with the (core-specific)
/// > flag variable that marks only the entry to the epilogue level on the
/// > corresponding core!
///
/// Interrupts should be disabled for as short as possible. Due to this
/// reason, the prologue/epilogue model allows epilogues to be interrupted
/// by prologues. This means that interrupts should be
/// [enabled][crate::arch::int::enable] again before the epilogue is
/// executed (this includes notifying the APIC about the "End-Of-Interrupt")
pub struct Guard {
    guarded: Ticket<Guarded>,
    /// Indicates whether the epilogue layer is active on the corresponding core
    active: [AtomicBool; MAX_CPUS],
    /// CPU-local queues for epilogues
    epilogues: UnsafeCell<[ArrayDeque<Epilogue, LEN>; MAX_CPUS]>,
}

unsafe impl Sync for Guard {}

/// Protected and synchronized kernel objects
pub struct Guarded {
    pub scheduler: Scheduler,
    pub bell_ringer: BellRinger,
    pub keyboard: KeyBuffer,
}

impl Guard {
    pub const fn new() -> Self {
        Self {
            guarded: Ticket::new(Guarded {
                scheduler: Scheduler::new(),
                bell_ringer: BellRinger::new(),
                keyboard: KeyBuffer::new(),
            }),
            active: [const { AtomicBool::new(false) }; MAX_CPUS],
            epilogues: UnsafeCell::new([const { ArrayDeque::new() }; MAX_CPUS]),
        }
    }

    fn epilogues(&self) -> &mut ArrayDeque<Epilogue, LEN> {
        assert!(!int::enabled(), "Possible Race Condition!");
        &mut (unsafe { &mut *self.epilogues.get() })[cpu::id()]
    }

    /// Temporary read-only access.
    ///
    /// Safety: beware race conditions!
    pub unsafe fn read(&self) -> &Guarded {
        self.guarded.raw()
    }

    /// Enter the epilogue layer or wait synchronously if it is already occupied.
    ///
    /// A guard object is returned that unlocks the epilogue layer when it falls out of scope.
    #[track_caller]
    pub fn enter(&self) -> GuardedGuard<'_> {
        let caller = core::panic::Location::caller();

        if self.active[cpu::id()].swap(true, Relaxed) {
            panic!("Guard double entry {caller}");
        }

        GuardedGuard {
            inner: Some(self.guarded.lock()),
        }
    }

    /// Register the given epilogue, which is either executed directly if possible
    /// or it is enqueued for later execution.
    pub fn relay(&self, mut epilogue: Epilogue) {
        // We have to secure locking the CPU to prevent a timer interrupt scheduling
        // us onto a different core after reading the CPUID (but before setting "epi_flag")
        if self.active[cpu::id()]
            .compare_exchange(false, true, Relaxed, Relaxed)
            .is_ok()
        {
            // Enable interrupts (interrupt_handler should already have sent the ACK IRQ via LAPIC)
            int::enable(true);
            // Interrupts shall be enabled on acquiring the big kernel lock
            let mut g = self.guarded.lock();
            // Process epilogue
            epilogue.run(&mut g);

            core::mem::forget(g);

            self.leave();
        } else if self.epilogues().push_back(epilogue).is_err() {
            // We have lost an interrupt!
            debug!("!");
        }
    }

    /// Leave the epilogue layer.
    pub fn leave(&self) {
        let status = int::enabled();

        int::enable(false);
        while let Some(mut epilogue) = self.epilogues().pop_front() {
            int::enable(status);

            // Here threads might switch!
            epilogue.run(unsafe { self.guarded.raw() });

            int::enable(false);
        }

        // Unlock the guard
        unsafe { self.guarded.unlock() };
        self.active[cpu::id()].store(false, Relaxed);

        int::enable(status);
    }

    /// Returns wether the layer 1/2 is active
    pub fn active(&self) -> bool {
        self.active[cpu::id()].load(Relaxed)
    }
}

/// Provides access to protected objects and unlocks the guard
/// when it goes out of scope.
pub struct GuardedGuard<'a> {
    inner: Option<TicketGuard<'a, Guarded>>,
}

impl Deref for GuardedGuard<'_> {
    type Target = Guarded;

    fn deref(&self) -> &Self::Target {
        self.inner.as_ref().unwrap()
    }
}

impl DerefMut for GuardedGuard<'_> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        self.inner.as_mut().unwrap()
    }
}

impl Drop for GuardedGuard<'_> {
    fn drop(&mut self) {
        // already unlocked as part of leave
        core::mem::forget(self.inner.take());
        GUARD.leave();
    }
}