Files
ladybird/Kernel/Arch/aarch64/Processor.cpp
Timon Kruiper 892f81b01a Kernel/aarch64: Add initial implementation of Processor::init_context
This does not handle everything correctly yet, such as setting the
correct state for running userspace applications, however this should be
enough to get kernel scheduling to work.
2022-12-29 19:32:20 -07:00

270 lines
8.7 KiB
C++

/*
* Copyright (c) 2022, Timon Kruiper <timonkruiper@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <AK/Vector.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/TrapFrame.h>
#include <Kernel/Arch/aarch64/ASM_wrapper.h>
#include <Kernel/Arch/aarch64/CPU.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Random.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Thread.h>
#include <Kernel/Time/TimeManagement.h>
extern "C" uintptr_t vector_table_el1;
namespace Kernel {
extern "C" void thread_context_first_enter(void);
extern "C" void exit_kernel_thread(void);
Processor* g_current_processor;
void Processor::initialize(u32 cpu)
{
VERIFY(g_current_processor == nullptr);
auto current_exception_level = static_cast<u64>(Aarch64::Asm::get_current_exception_level());
dbgln("CPU{} started in: EL{}", cpu, current_exception_level);
dbgln("Drop CPU{} to EL1", cpu);
drop_to_exception_level_1();
// Load EL1 vector table
Aarch64::Asm::el1_vector_table_install(&vector_table_el1);
g_current_processor = this;
}
[[noreturn]] void Processor::halt()
{
disable_interrupts();
for (;;)
asm volatile("wfi");
}
void Processor::flush_tlb_local(VirtualAddress, size_t)
{
// FIXME: Figure out how to flush a single page
asm volatile("dsb ishst");
asm volatile("tlbi vmalle1is");
asm volatile("dsb ish");
asm volatile("isb");
}
void Processor::flush_tlb(Memory::PageDirectory const*, VirtualAddress vaddr, size_t page_count)
{
flush_tlb_local(vaddr, page_count);
}
u32 Processor::clear_critical()
{
InterruptDisabler disabler;
auto prev_critical = in_critical();
auto& proc = current();
proc.m_in_critical = 0;
if (proc.m_in_irq == 0)
proc.check_invoke_scheduler();
return prev_critical;
}
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{
(void)wake_count;
// FIXME: Actually wake up other cores when SMP is supported for aarch64.
return 0;
}
void Processor::initialize_context_switching(Thread& initial_thread)
{
(void)initial_thread;
TODO_AARCH64();
}
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
{
(void)from_thread;
(void)to_thread;
TODO_AARCH64();
}
void Processor::assume_context(Thread& thread, FlatPtr flags)
{
(void)thread;
(void)flags;
TODO_AARCH64();
}
FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
{
VERIFY(g_scheduler_lock.is_locked());
if (leave_crit) {
// Leave the critical section we set up in Process::exec,
// but because we still have the scheduler lock we should end up with 1
VERIFY(in_critical() == 2);
m_in_critical = 1; // leave it without triggering anything or restoring flags
}
u64 kernel_stack_top = thread.kernel_stack_top();
// Add a random offset between 0-256 (16-byte aligned)
kernel_stack_top -= round_up_to_power_of_two(get_fast_random<u8>(), 16);
u64 stack_top = kernel_stack_top;
auto& thread_regs = thread.regs();
// Push a RegisterState and TrapFrame onto the stack, which will be popped of the stack and restored into the
// state of the processor by restore_previous_context.
stack_top -= sizeof(RegisterState);
RegisterState& eretframe = *reinterpret_cast<RegisterState*>(stack_top);
memcpy(eretframe.x, thread_regs.x, sizeof(thread_regs.x));
// x30 is the Link Register for the aarch64 ABI, so this will return to exit_kernel_thread when main thread function returns.
eretframe.x[30] = FlatPtr(&exit_kernel_thread);
eretframe.elr_el1 = thread_regs.elr_el1;
eretframe.sp_el0 = kernel_stack_top;
eretframe.tpidr_el0 = 0; // FIXME: Correctly initialize this when aarch64 has support for thread local storage.
Aarch64::SPSR_EL1 saved_program_status_register_el1 = {};
// Don't mask any interrupts, so all interrupts are enabled when transfering into the new context
saved_program_status_register_el1.D = 0;
saved_program_status_register_el1.A = 0;
saved_program_status_register_el1.I = 0;
saved_program_status_register_el1.F = 0;
// Set exception origin mode to EL1t, so when the context is restored, we'll be executing in EL1 with SP_EL0
// FIXME: This must be EL0t when aarch64 supports userspace applications.
saved_program_status_register_el1.M = Aarch64::SPSR_EL1::Mode::EL1t;
memcpy(&eretframe.spsr_el1, &saved_program_status_register_el1, sizeof(u64));
// Push a TrapFrame onto the stack
stack_top -= sizeof(TrapFrame);
TrapFrame& trap = *reinterpret_cast<TrapFrame*>(stack_top);
trap.regs = &eretframe;
trap.next_trap = nullptr;
if constexpr (CONTEXT_SWITCH_DEBUG) {
dbgln("init_context {} ({}) set up to execute at ip={}, sp={}, stack_top={}",
thread,
VirtualAddress(&thread),
VirtualAddress(thread_regs.elr_el1),
VirtualAddress(thread_regs.sp_el0),
VirtualAddress(stack_top));
}
// This make sure the thread first executes thread_context_first_enter, which will actually call restore_previous_context
// which restores the context set up above.
thread_regs.set_sp(stack_top);
thread_regs.set_ip(FlatPtr(&thread_context_first_enter));
return stack_top;
}
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
// FIXME: Figure out if we need prev_irq_level, see duplicated code in Kernel/Arch/x86/common/Processor.cpp
if (raise_irq)
m_in_irq++;
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
trap.next_trap = current_trap;
current_trap = &trap;
// FIXME: Determine PreviousMode from TrapFrame when userspace programs can run on aarch64
auto new_previous_mode = Thread::PreviousMode::KernelMode;
if (current_thread->set_previous_mode(new_previous_mode)) {
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), new_previous_mode == Thread::PreviousMode::KernelMode, false);
}
} else {
trap.next_trap = nullptr;
}
}
void Processor::exit_trap(TrapFrame& trap)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
// Temporarily enter a critical section. This is to prevent critical
// sections entered and left within e.g. smp_process_pending_messages
// to trigger a context switch while we're executing this function
// See the comment at the end of the function why we don't use
// ScopedCritical here.
m_in_critical = m_in_critical + 1;
// FIXME: Figure out if we need prev_irq_level, see duplicated code in Kernel/Arch/x86/common/Processor.cpp
m_in_irq = 0;
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
current_trap = trap.next_trap;
Thread::PreviousMode new_previous_mode;
if (current_trap) {
VERIFY(current_trap->regs);
// FIXME: Determine PreviousMode from TrapFrame when userspace programs can run on aarch64
new_previous_mode = Thread::PreviousMode::KernelMode;
} else {
// If we don't have a higher level trap then we're back in user mode.
// Which means that the previous mode prior to being back in user mode was kernel mode
new_previous_mode = Thread::PreviousMode::KernelMode;
}
if (current_thread->set_previous_mode(new_previous_mode))
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), true, false);
}
VERIFY_INTERRUPTS_DISABLED();
// Leave the critical section without actually enabling interrupts.
// We don't want context switches to happen until we're explicitly
// triggering a switch in check_invoke_scheduler.
m_in_critical = m_in_critical - 1;
if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
}
ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
{
(void)thread;
(void)max_frames;
TODO_AARCH64();
return Vector<FlatPtr, 32> {};
}
void Processor::check_invoke_scheduler()
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}
}
NAKED void thread_context_first_enter(void)
{
asm(
// FIXME: Implement this
"wfi \n");
}
void exit_kernel_thread(void)
{
Thread::current()->exit();
}
}