rstubs/arch/
cpu.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
//! CPU specific funcitions

use core::arch::asm;
use core::mem::size_of;
use core::sync::atomic::{AtomicU32, Ordering};

use raw_cpuid::{CpuId, CpuIdReader};

use super::int::apic::LAPIC_IDS;
use super::int::lapic::LAPIC;
use crate::MAX_CPUS;

static ONLINE_MASK: AtomicU32 = AtomicU32::new(0);
const _: () = assert!(MAX_CPUS <= size_of::<AtomicU32>() * 8);

/// Halt the cpu
pub fn halt() {
    unsafe { asm!("hlt", options(nomem, nostack)) };
}

/// Initialize the CPU
pub fn init(core: usize) {
    ONLINE_MASK.fetch_or(1 << core, Ordering::AcqRel);
}

/// Returns a contiguous CPU ID
pub fn id() -> usize {
    // The lapic defines the (hierarchical) CPU ID.
    // We assume these IDs are not contiguous,
    // which is not guaranteed on multi-NUMA systems.
    if let Some(ids) = LAPIC_IDS.get() {
        let id = LAPIC.id();
        ids.iter().position(|v| *v == id).unwrap_or(0)
    } else {
        0
    }
}

/// Returns a bitmap of online CPUs
pub fn online() -> u32 {
    ONLINE_MASK.load(Ordering::Acquire)
}

/// Return an iterator over all online CPUs
pub fn iter() -> impl Iterator<Item = usize> {
    let mask = ONLINE_MASK.load(Ordering::Acquire);
    (0..MAX_CPUS).filter(move |i| mask & (1 << i) != 0)
}

/// Return the cpuid infomation
pub fn cpuid() -> CpuId<impl CpuIdReader> {
    #[derive(Clone, Copy)]
    struct CpuIdNative;
    impl CpuIdReader for CpuIdNative {
        fn cpuid2(&self, eax: u32, ecx: u32) -> raw_cpuid::CpuIdResult {
            let core::arch::x86::CpuidResult { eax, ebx, ecx, edx } =
                unsafe { core::arch::x86::__cpuid_count(eax, ecx) };
            raw_cpuid::CpuIdResult { eax, ebx, ecx, edx }
        }
    }
    CpuId::with_cpuid_reader(CpuIdNative)
}