1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
//! Utilities like useful data structures and locks

#![allow(unused)]

use core::cell::UnsafeCell;
use core::fmt;
use core::mem::size_of_val;
use core::mem::{align_of, size_of};
use core::ops::{Deref, DerefMut};

mod spin;
pub use spin::{Spin, SpinGuard};
mod ticket;
pub use ticket::{Ticket, TicketGuard};
mod once;
pub use once::{Lazy, Once, OnceError};

use crate::arch::{cpu, int};
use crate::interrupts::guard::GUARD;
use crate::MAX_CPUS;

/// Abstraction for per cpu data.
///
/// As the data can only edited by the corresponding cpu,
/// mutable access is save even without locking.
/// The data is cache aligned, to prevent false-sharing effects.
///
/// # Safety
///
/// Be aware of context switches!
/// If the thread changes while the per-cpu data is updated, things might break.
/// Disable interrupts or enter the E1/2 to prevent this!
#[derive(Debug)]
pub struct PerCPU<T>(UnsafeCell<[Align<T>; MAX_CPUS]>);

unsafe impl<T> Sync for PerCPU<T> {}

impl<T> PerCPU<T> {
    pub const fn new(v: [Align<T>; MAX_CPUS]) -> Self {
        Self(UnsafeCell::new(v))
    }
    /// Get the cpu-local value.
    ///
    /// Note: Preemption has to be disabled!
    #[allow(clippy::mut_from_ref)]
    #[track_caller]
    pub fn get(&self) -> &mut T {
        debug_assert!(
            !int::enabled() || GUARD.active(),
            "Preemption not disabled {}",
            core::panic::Location::caller()
        );
        unsafe { self.get_raw(cpu::id()) }
    }
    /// Get a cpu-local value.
    #[allow(clippy::mut_from_ref)]
    pub unsafe fn get_raw(&self, id: usize) -> &mut T {
        let inner = &mut *self.0.get();
        &mut inner[id]
    }
}

/// Provides cache alignment for T
#[derive(Clone, Default, Hash, PartialEq, Eq)]
#[repr(align(64))]
pub struct Align<T = ()>(pub T);

const _: () = assert!(align_of::<Align>() == 64);
const _: () = assert!(align_of::<Align<usize>>() == 64);

impl<T> Deref for Align<T> {
    type Target = T;
    fn deref(&self) -> &T {
        &self.0
    }
}
impl<T> DerefMut for Align<T> {
    fn deref_mut(&mut self) -> &mut T {
        &mut self.0
    }
}
impl<T: fmt::Debug> fmt::Debug for Align<T> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        fmt::Debug::fmt(&self.0, f)
    }
}
impl<T> From<T> for Align<T> {
    fn from(t: T) -> Self {
        Align(t)
    }
}

/// Returns the smallest multiple of `align` larger or equal `v`
pub fn align_up(v: usize, align: usize) -> usize {
    v.next_multiple_of(align)
}
/// Returns the larges multiple of `align` smaller or equal `v`
pub fn align_down(v: usize, align: usize) -> usize {
    (v / align) * align
}
/// Cast the slice to another data type
pub unsafe fn slice_cast<I: Sized, O: Sized>(input: &[I]) -> &[O] {
    assert!(input.as_ptr().cast::<O>().is_aligned());
    let len = size_of_val(input) / size_of::<O>();
    core::slice::from_raw_parts(input.as_ptr().cast(), len)
}
/// Cast the slice to another data type
pub unsafe fn slice_cast_mut<I: Sized, O: Sized>(input: &mut [I]) -> &mut [O] {
    assert!(input.as_ptr().cast::<O>().is_aligned());
    let len = size_of_val(input) / size_of::<O>();
    core::slice::from_raw_parts_mut(input.as_mut_ptr().cast(), len)
}