From 2bba9411caf451103477d2bd8a333c35aa28b90e Mon Sep 17 00:00:00 2001 From: "Liav A." Date: Mon, 22 Apr 2024 13:30:09 +0300 Subject: [PATCH] Kernel: Use the AK SetOnce container class in various cases We have many places in the kernel code that we have boolean flags that are only set once, and never reset again but are checked multiple times before and after the time they're being set, which matches the purpose of the SetOnce class. --- Kernel/Arch/Processor.cpp | 2 +- Kernel/Arch/Processor.h | 4 +++- Kernel/Arch/aarch64/Dummy.cpp | 7 +++--- Kernel/Arch/aarch64/Processor.cpp | 2 +- Kernel/Arch/init.cpp | 7 +++--- Kernel/Arch/riscv64/PCI/Initializer.cpp | 10 ++++---- Kernel/Arch/riscv64/Processor.cpp | 2 +- .../x86_64/Firmware/PCBIOS/SysFSDirectory.cpp | 4 ++-- .../x86_64/Firmware/PCBIOS/SysFSDirectory.h | 3 ++- Kernel/Arch/x86_64/Interrupts/APIC.cpp | 24 +++++++++---------- Kernel/Arch/x86_64/Interrupts/APIC.h | 3 ++- Kernel/Arch/x86_64/PCI/Initializer.cpp | 19 +++++++++------ Kernel/Arch/x86_64/Processor.cpp | 9 +++---- Kernel/Arch/x86_64/Processor.h | 3 ++- Kernel/Arch/x86_64/VGA/IOArbiter.cpp | 10 ++++---- Kernel/Arch/x86_64/VGA/IOArbiter.h | 3 ++- Kernel/Bus/PCI/Access.cpp | 4 ++-- Kernel/Bus/PCI/Initializer.h | 6 +++-- Kernel/Bus/VirtIO/Device.cpp | 12 +++++----- Kernel/Bus/VirtIO/Device.h | 7 +++--- Kernel/Bus/VirtIO/Transport/Entity.cpp | 2 +- Kernel/Bus/VirtIO/Transport/Entity.h | 3 ++- .../VirtIO/Transport/PCIe/TransportLink.cpp | 4 ++-- Kernel/Heap/kmalloc.cpp | 2 +- Kernel/KSyms.cpp | 8 +++---- Kernel/KSyms.h | 3 ++- Kernel/Library/KString.cpp | 7 +++--- Kernel/Locking/Mutex.cpp | 9 +++---- Kernel/Memory/Region.h | 8 ++++--- Kernel/Net/IPv4Socket.cpp | 8 +++---- Kernel/Net/IPv4Socket.h | 5 ++-- Kernel/Net/Intel/E1000ENetworkAdapter.cpp | 7 +++--- Kernel/Net/Intel/E1000NetworkAdapter.cpp | 9 ++++--- Kernel/Net/Intel/E1000NetworkAdapter.h | 3 ++- Kernel/Net/LocalSocket.cpp | 6 ++--- Kernel/Net/LocalSocket.h | 3 ++- Kernel/Net/TCPSocket.cpp | 2 +- Kernel/SanCov.cpp | 5 ++-- Kernel/Tasks/Process.cpp | 2 +- Kernel/Time/TimeManagement.cpp | 4 ++-- Kernel/Time/TimeManagement.h | 5 ++-- 41 files changed, 135 insertions(+), 111 deletions(-) diff --git a/Kernel/Arch/Processor.cpp b/Kernel/Arch/Processor.cpp index 122b582433..83958d72bc 100644 --- a/Kernel/Arch/Processor.cpp +++ b/Kernel/Arch/Processor.cpp @@ -23,7 +23,7 @@ void ProcessorBase::check_invoke_scheduler() VERIFY(!m_in_irq); VERIFY(!m_in_critical); VERIFY(&Processor::current() == this); - if (m_invoke_scheduler_async && m_scheduler_initialized) { + if (m_invoke_scheduler_async && m_scheduler_initialized.was_set()) { m_invoke_scheduler_async = false; Scheduler::invoke_async(); } diff --git a/Kernel/Arch/Processor.h b/Kernel/Arch/Processor.h index 0b6e4959b8..6d7fd37e0f 100644 --- a/Kernel/Arch/Processor.h +++ b/Kernel/Arch/Processor.h @@ -8,6 +8,7 @@ #pragma once #include +#include #include #include #include @@ -195,7 +196,8 @@ private: // they need to be FlatPtrs or everything becomes highly unsound and breaks. They are actually just booleans. FlatPtr m_in_scheduler; FlatPtr m_invoke_scheduler_async; - FlatPtr m_scheduler_initialized; + + SetOnce m_scheduler_initialized; DeferredCallPool m_deferred_call_pool {}; }; diff --git a/Kernel/Arch/aarch64/Dummy.cpp b/Kernel/Arch/aarch64/Dummy.cpp index a8d3aec853..9491d422d3 100644 --- a/Kernel/Arch/aarch64/Dummy.cpp +++ b/Kernel/Arch/aarch64/Dummy.cpp @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -26,13 +27,13 @@ void microseconds_delay(u32) // Initializer.cpp namespace Kernel::PCI { -bool g_pci_access_io_probe_failed { false }; -bool g_pci_access_is_disabled_from_commandline { true }; +SetOnce g_pci_access_io_probe_failed; +SetOnce g_pci_access_is_disabled_from_commandline; void initialize() { dbgln("PCI: FIXME: Enable PCI for aarch64 platforms"); - g_pci_access_io_probe_failed = true; + g_pci_access_io_probe_failed.set(); } } diff --git a/Kernel/Arch/aarch64/Processor.cpp b/Kernel/Arch/aarch64/Processor.cpp index 23d8968873..1890317fa4 100644 --- a/Kernel/Arch/aarch64/Processor.cpp +++ b/Kernel/Arch/aarch64/Processor.cpp @@ -157,7 +157,7 @@ void ProcessorBase::initialize_context_switching(Thread& initial_thread) { VERIFY(initial_thread.process().is_kernel_process()); - m_scheduler_initialized = true; + m_scheduler_initialized.set(); // FIXME: Figure out if we need to call {pre_,post_,}init_finished once aarch64 supports SMP Processor::set_current_in_scheduler(true); diff --git a/Kernel/Arch/init.cpp b/Kernel/Arch/init.cpp index 5ee30b0617..d6e3aabb5f 100644 --- a/Kernel/Arch/init.cpp +++ b/Kernel/Arch/init.cpp @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -100,7 +101,7 @@ extern "C" u8 end_of_kernel_image[]; multiboot_module_entry_t multiboot_copy_boot_modules_array[16]; size_t multiboot_copy_boot_modules_count; -READONLY_AFTER_INIT bool g_in_early_boot; +READONLY_AFTER_INIT SetOnce g_not_in_early_boot; namespace Kernel { @@ -168,8 +169,6 @@ READONLY_AFTER_INIT static u8 s_command_line_buffer[512]; extern "C" [[noreturn]] UNMAP_AFTER_INIT NO_SANITIZE_COVERAGE void init([[maybe_unused]] BootInfo const& boot_info) { - g_in_early_boot = true; - #if ARCH(X86_64) start_of_prekernel_image = PhysicalAddress { boot_info.start_of_prekernel_image }; end_of_prekernel_image = PhysicalAddress { boot_info.end_of_prekernel_image }; @@ -460,7 +459,7 @@ void init_stage2(void*) } // Switch out of early boot mode. - g_in_early_boot = false; + g_not_in_early_boot.set(); // NOTE: Everything marked READONLY_AFTER_INIT becomes non-writable after this point. MM.protect_readonly_after_init_memory(); diff --git a/Kernel/Arch/riscv64/PCI/Initializer.cpp b/Kernel/Arch/riscv64/PCI/Initializer.cpp index d1159b83ec..00711af6a3 100644 --- a/Kernel/Arch/riscv64/PCI/Initializer.cpp +++ b/Kernel/Arch/riscv64/PCI/Initializer.cpp @@ -4,6 +4,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include #include @@ -15,14 +16,15 @@ namespace Kernel::PCI { -bool g_pci_access_io_probe_failed { false }; -bool g_pci_access_is_disabled_from_commandline; +SetOnce g_pci_access_io_probe_failed; +SetOnce g_pci_access_is_disabled_from_commandline; void initialize() { - g_pci_access_is_disabled_from_commandline = kernel_command_line().is_pci_disabled(); - if (g_pci_access_is_disabled_from_commandline) + if (kernel_command_line().is_pci_disabled()) { + g_pci_access_is_disabled_from_commandline.set(); return; + } new Access(); diff --git a/Kernel/Arch/riscv64/Processor.cpp b/Kernel/Arch/riscv64/Processor.cpp index 16a00e6274..d9043aae69 100644 --- a/Kernel/Arch/riscv64/Processor.cpp +++ b/Kernel/Arch/riscv64/Processor.cpp @@ -187,7 +187,7 @@ void ProcessorBase::initialize_context_switching(Thread& initial_thread) { VERIFY(initial_thread.process().is_kernel_process()); - m_scheduler_initialized = true; + m_scheduler_initialized.set(); // FIXME: Figure out if we need to call {pre_,post_,}init_finished once riscv64 supports SMP Processor::set_current_in_scheduler(true); diff --git a/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.cpp b/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.cpp index d661ce3014..40987edb71 100644 --- a/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.cpp +++ b/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.cpp @@ -68,7 +68,7 @@ void SysFSBIOSDirectory::create_components() UNMAP_AFTER_INIT void SysFSBIOSDirectory::initialize_dmi_exposer() { VERIFY(!(m_dmi_entry_point.is_null())); - if (m_using_64bit_dmi_entry_point) { + if (m_using_64bit_dmi_entry_point.was_set()) { set_dmi_64_bit_entry_initialization_values(); } else { set_dmi_32_bit_entry_initialization_values(); @@ -87,7 +87,7 @@ UNMAP_AFTER_INIT SysFSBIOSDirectory::SysFSBIOSDirectory(SysFSFirmwareDirectory& auto entry_64bit = find_dmi_entry64bit_point(); if (entry_64bit.has_value()) { m_dmi_entry_point = entry_64bit.value(); - m_using_64bit_dmi_entry_point = true; + m_using_64bit_dmi_entry_point.set(); } if (m_dmi_entry_point.is_null()) return; diff --git a/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.h b/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.h index a401ca34bd..63b112f6d8 100644 --- a/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.h +++ b/Kernel/Arch/x86_64/Firmware/PCBIOS/SysFSDirectory.h @@ -7,6 +7,7 @@ #pragma once #include +#include #include #include #include @@ -34,7 +35,7 @@ private: PhysicalAddress m_dmi_entry_point; PhysicalAddress m_smbios_structure_table; - bool m_using_64bit_dmi_entry_point { false }; + SetOnce m_using_64bit_dmi_entry_point; size_t m_smbios_structure_table_length { 0 }; size_t m_dmi_entry_point_length { 0 }; }; diff --git a/Kernel/Arch/x86_64/Interrupts/APIC.cpp b/Kernel/Arch/x86_64/Interrupts/APIC.cpp index 720850898e..3d6682db31 100644 --- a/Kernel/Arch/x86_64/Interrupts/APIC.cpp +++ b/Kernel/Arch/x86_64/Interrupts/APIC.cpp @@ -147,14 +147,14 @@ void APIC::set_base(PhysicalAddress const& base) { MSR msr(APIC_BASE_MSR); u64 flags = 1 << 11; - if (m_is_x2) + if (m_is_x2.was_set()) flags |= 1 << 10; msr.set(base.get() | flags); } void APIC::write_register(u32 offset, u32 value) { - if (m_is_x2) { + if (m_is_x2.was_set()) { MSR msr(APIC_REGS_MSR_BASE + (offset >> 4)); msr.set(value); } else { @@ -164,7 +164,7 @@ void APIC::write_register(u32 offset, u32 value) u32 APIC::read_register(u32 offset) { - if (m_is_x2) { + if (m_is_x2.was_set()) { MSR msr(APIC_REGS_MSR_BASE + (offset >> 4)); return (u32)msr.get(); } @@ -190,7 +190,7 @@ void APIC::wait_for_pending_icr() void APIC::write_icr(ICRReg const& icr) { - if (m_is_x2) { + if (m_is_x2.was_set()) { MSR msr(APIC_REGS_MSR_BASE + (APIC_REG_ICR_LOW >> 4)); msr.set(icr.x2_value()); } else { @@ -247,13 +247,13 @@ UNMAP_AFTER_INIT bool APIC::init_bsp() if ((id.edx() & (1 << 9)) == 0) return false; if (id.ecx() & (1 << 21)) - m_is_x2 = true; + m_is_x2.set(); PhysicalAddress apic_base = get_base(); - dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2 ? "x2" : "x", apic_base); + dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2.was_set() ? "x2" : "x", apic_base); set_base(apic_base); - if (!m_is_x2) { + if (!m_is_x2.was_set()) { auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite); if (region_or_error.is_error()) { dbgln("APIC: Failed to allocate memory for APIC base"); @@ -463,10 +463,10 @@ UNMAP_AFTER_INIT void APIC::boot_aps() UNMAP_AFTER_INIT void APIC::enable(u32 cpu) { - VERIFY(m_is_x2 || cpu < 8); + VERIFY(m_is_x2.was_set() || cpu < 8); u32 apic_id; - if (m_is_x2) { + if (m_is_x2.was_set()) { dbgln_if(APIC_DEBUG, "Enable x2APIC on CPU #{}", cpu); // We need to enable x2 mode on each core independently @@ -498,7 +498,7 @@ UNMAP_AFTER_INIT void APIC::enable(u32 cpu) APICIPIInterruptHandler::initialize(IRQ_APIC_IPI); } - if (!m_is_x2) { + if (!m_is_x2.was_set()) { // local destination mode (flat mode), not supported in x2 mode write_register(APIC_REG_DF, 0xf0000000); } @@ -566,12 +566,12 @@ void APIC::send_ipi(u32 cpu) VERIFY(cpu != Processor::current_id()); VERIFY(cpu < Processor::count()); wait_for_pending_icr(); - write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2 ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2 ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand }); + write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2.was_set() ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2.was_set() ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand }); } UNMAP_AFTER_INIT APICTimer* APIC::initialize_timers(HardwareTimerBase& calibration_timer) { - if (!m_apic_base && !m_is_x2) + if (!m_apic_base && !m_is_x2.was_set()) return nullptr; // We should only initialize and calibrate the APIC timer once on the BSP! diff --git a/Kernel/Arch/x86_64/Interrupts/APIC.h b/Kernel/Arch/x86_64/Interrupts/APIC.h index f64bca7844..fbfe095306 100644 --- a/Kernel/Arch/x86_64/Interrupts/APIC.h +++ b/Kernel/Arch/x86_64/Interrupts/APIC.h @@ -6,6 +6,7 @@ #pragma once +#include #include #include #include @@ -99,7 +100,7 @@ private: u32 m_processor_cnt { 0 }; u32 m_processor_enabled_cnt { 0 }; APICTimer* m_apic_timer { nullptr }; - bool m_is_x2 { false }; + SetOnce m_is_x2; static PhysicalAddress get_base(); void set_base(PhysicalAddress const& base); diff --git a/Kernel/Arch/x86_64/PCI/Initializer.cpp b/Kernel/Arch/x86_64/PCI/Initializer.cpp index be0bdab613..08ea2a6a88 100644 --- a/Kernel/Arch/x86_64/PCI/Initializer.cpp +++ b/Kernel/Arch/x86_64/PCI/Initializer.cpp @@ -4,6 +4,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include #include @@ -17,8 +18,8 @@ namespace Kernel::PCI { -READONLY_AFTER_INIT bool g_pci_access_io_probe_failed; -READONLY_AFTER_INIT bool g_pci_access_is_disabled_from_commandline; +READONLY_AFTER_INIT SetOnce g_pci_access_io_probe_failed; +READONLY_AFTER_INIT SetOnce g_pci_access_is_disabled_from_commandline; static bool test_pci_io(); @@ -31,7 +32,7 @@ UNMAP_AFTER_INIT static PCIAccessLevel detect_optimal_access_type() if (boot_determined != PCIAccessLevel::IOAddressing) return boot_determined; - if (!g_pci_access_io_probe_failed) + if (!g_pci_access_io_probe_failed.was_set()) return PCIAccessLevel::IOAddressing; PANIC("No PCI bus access method detected!"); @@ -39,7 +40,9 @@ UNMAP_AFTER_INIT static PCIAccessLevel detect_optimal_access_type() UNMAP_AFTER_INIT void initialize() { - g_pci_access_is_disabled_from_commandline = kernel_command_line().is_pci_disabled(); + if (kernel_command_line().is_pci_disabled()) + g_pci_access_is_disabled_from_commandline.set(); + Optional possible_mcfg; // FIXME: There are other arch-specific methods to find the memory range // for accessing the PCI configuration space. @@ -47,11 +50,13 @@ UNMAP_AFTER_INIT void initialize() // parse it to find a PCI host bridge. if (ACPI::is_enabled()) { possible_mcfg = ACPI::Parser::the()->find_table("MCFG"sv); - g_pci_access_io_probe_failed = (!test_pci_io()) && (!possible_mcfg.has_value()); + if ((!test_pci_io()) && (!possible_mcfg.has_value())) + g_pci_access_io_probe_failed.set(); } else { - g_pci_access_io_probe_failed = !test_pci_io(); + if (!test_pci_io()) + g_pci_access_io_probe_failed.set(); } - if (g_pci_access_is_disabled_from_commandline || g_pci_access_io_probe_failed) + if (g_pci_access_is_disabled_from_commandline.was_set() || g_pci_access_io_probe_failed.was_set()) return; switch (detect_optimal_access_type()) { case PCIAccessLevel::MemoryAddressing: { diff --git a/Kernel/Arch/x86_64/Processor.cpp b/Kernel/Arch/x86_64/Processor.cpp index 5da7490076..8620c6e62d 100644 --- a/Kernel/Arch/x86_64/Processor.cpp +++ b/Kernel/Arch/x86_64/Processor.cpp @@ -455,8 +455,6 @@ UNMAP_AFTER_INIT void Processor::cpu_detect() } } - m_has_qemu_hvf_quirk = false; - if (max_extended_leaf >= 0x80000008) { // CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor. CPUID cpuid(0x80000008); @@ -478,7 +476,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect() if (has_feature(CPUFeature::HYPERVISOR)) { CPUID hypervisor_leaf_range(0x40000000); if (!hypervisor_leaf_range.ebx() && m_physical_address_bit_width == 36) { - m_has_qemu_hvf_quirk = true; + m_has_qemu_hvf_quirk.set(); m_virtual_address_bit_width = 48; } } @@ -602,7 +600,6 @@ UNMAP_AFTER_INIT void ProcessorBase::early_initialize(u32 cpu) m_in_critical = 0; m_invoke_scheduler_async = false; - m_scheduler_initialized = false; m_in_scheduler = true; self->m_message_queue = nullptr; @@ -642,7 +639,7 @@ UNMAP_AFTER_INIT void ProcessorBase::initialize(u32 cpu) dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id()); dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width); dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width); - if (self->m_has_qemu_hvf_quirk) + if (self->m_has_qemu_hvf_quirk.was_set()) dmesgln("CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk", current_id()); if (cpu == 0) @@ -1688,7 +1685,7 @@ UNMAP_AFTER_INIT void ProcessorBase::initialize_context_switching(Thread& ini self->m_tss.rsp0l = regs.rsp0 & 0xffffffff; self->m_tss.rsp0h = regs.rsp0 >> 32; - m_scheduler_initialized = true; + m_scheduler_initialized.set(); // clang-format off asm volatile( diff --git a/Kernel/Arch/x86_64/Processor.h b/Kernel/Arch/x86_64/Processor.h index 3781265b15..66c15e0508 100644 --- a/Kernel/Arch/x86_64/Processor.h +++ b/Kernel/Arch/x86_64/Processor.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -72,7 +73,7 @@ private: static Atomic s_idle_cpu_mask; TSS m_tss; - bool m_has_qemu_hvf_quirk; + SetOnce m_has_qemu_hvf_quirk; ProcessorInfo* m_info; diff --git a/Kernel/Arch/x86_64/VGA/IOArbiter.cpp b/Kernel/Arch/x86_64/VGA/IOArbiter.cpp index 8d3e632f59..9561c5fdf1 100644 --- a/Kernel/Arch/x86_64/VGA/IOArbiter.cpp +++ b/Kernel/Arch/x86_64/VGA/IOArbiter.cpp @@ -28,7 +28,7 @@ void VGAIOArbiter::disable_vga_emulation_access_permanently(Badge) @@ -39,7 +39,7 @@ void VGAIOArbiter::enable_vga_text_mode_console_cursor(Badge void VGAIOArbiter::enable_vga_text_mode_console_cursor() { SpinlockLocker locker(m_main_vga_lock); - if (m_vga_access_is_disabled) + if (m_vga_access_is_disabled.was_set()) return; IO::out8(0x3D4, 0xA); IO::out8(0x3D5, 0); @@ -53,7 +53,7 @@ void VGAIOArbiter::disable_vga_text_mode_console_cursor(Badge) { SpinlockLocker locker(m_main_vga_lock); - if (m_vga_access_is_disabled) + if (m_vga_access_is_disabled.was_set()) return; IO::out8(0x3c0, 0x20); } @@ -70,7 +70,7 @@ void VGAIOArbiter::unblank_screen(Badge) void VGAIOArbiter::set_vga_text_mode_cursor(Badge, size_t console_width, size_t x, size_t y) { SpinlockLocker locker(m_main_vga_lock); - if (m_vga_access_is_disabled) + if (m_vga_access_is_disabled.was_set()) return; enable_vga_text_mode_console_cursor(); u16 value = y * console_width + x; diff --git a/Kernel/Arch/x86_64/VGA/IOArbiter.h b/Kernel/Arch/x86_64/VGA/IOArbiter.h index 70bcf4dc78..1f1d9a7c15 100644 --- a/Kernel/Arch/x86_64/VGA/IOArbiter.h +++ b/Kernel/Arch/x86_64/VGA/IOArbiter.h @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -34,7 +35,7 @@ private: void enable_vga_text_mode_console_cursor(); RecursiveSpinlock m_main_vga_lock {}; - bool m_vga_access_is_disabled { false }; + SetOnce m_vga_access_is_disabled; }; } diff --git a/Kernel/Bus/PCI/Access.cpp b/Kernel/Bus/PCI/Access.cpp index be6428fcb1..3e757e9e5e 100644 --- a/Kernel/Bus/PCI/Access.cpp +++ b/Kernel/Bus/PCI/Access.cpp @@ -41,12 +41,12 @@ bool Access::is_initialized() bool Access::is_hardware_disabled() { - return g_pci_access_io_probe_failed; + return g_pci_access_io_probe_failed.was_set(); } bool Access::is_disabled() { - return g_pci_access_is_disabled_from_commandline || g_pci_access_io_probe_failed; + return g_pci_access_is_disabled_from_commandline.was_set() || g_pci_access_io_probe_failed.was_set(); } UNMAP_AFTER_INIT bool Access::find_and_register_pci_host_bridges_from_acpi_mcfg_table(PhysicalAddress mcfg_table) diff --git a/Kernel/Bus/PCI/Initializer.h b/Kernel/Bus/PCI/Initializer.h index 4871a99d6c..919a61744e 100644 --- a/Kernel/Bus/PCI/Initializer.h +++ b/Kernel/Bus/PCI/Initializer.h @@ -6,10 +6,12 @@ #pragma once +#include + namespace Kernel::PCI { -extern bool g_pci_access_io_probe_failed; -extern bool g_pci_access_is_disabled_from_commandline; +extern SetOnce g_pci_access_io_probe_failed; +extern SetOnce g_pci_access_is_disabled_from_commandline; void initialize(); diff --git a/Kernel/Bus/VirtIO/Device.cpp b/Kernel/Bus/VirtIO/Device.cpp index 34a7219fed..bbbb30cc47 100644 --- a/Kernel/Bus/VirtIO/Device.cpp +++ b/Kernel/Bus/VirtIO/Device.cpp @@ -40,8 +40,8 @@ void Device::set_status_bit(u8 status_bit) ErrorOr Device::accept_device_features(u64 device_features, u64 accepted_features) { - VERIFY(!m_did_accept_features); - m_did_accept_features = true; + VERIFY(!m_did_accept_features.was_set()); + m_did_accept_features.set(); if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) { accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver @@ -89,8 +89,8 @@ ErrorOr Device::setup_queue(u16 queue_index) ErrorOr Device::setup_queues(u16 requested_queue_count) { - VERIFY(!m_did_setup_queues); - m_did_setup_queues = true; + VERIFY(!m_did_setup_queues.was_set()); + m_did_setup_queues.set(); auto* common_cfg = TRY(m_transport_entity->get_config(ConfigurationType::Common)); if (common_cfg) { @@ -120,8 +120,8 @@ ErrorOr Device::setup_queues(u16 requested_queue_count) void Device::finish_init() { - VERIFY(m_did_accept_features); // ensure features were negotiated - VERIFY(m_did_setup_queues); // ensure queues were set-up + VERIFY(m_did_accept_features.was_set()); // ensure features were negotiated + VERIFY(m_did_setup_queues.was_set()); // ensure queues were set-up VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization set_status_bit(DEVICE_STATUS_DRIVER_OK); diff --git a/Kernel/Bus/VirtIO/Device.h b/Kernel/Bus/VirtIO/Device.h index b2138e56f8..8bb6427c71 100644 --- a/Kernel/Bus/VirtIO/Device.h +++ b/Kernel/Bus/VirtIO/Device.h @@ -6,6 +6,7 @@ #pragma once +#include #include #include #include @@ -66,7 +67,7 @@ protected: } bool is_feature_accepted(u64 feature) const { - VERIFY(m_did_accept_features); + VERIFY(m_did_accept_features.was_set()); return is_feature_set(m_accepted_features, feature); } @@ -91,8 +92,8 @@ private: u16 m_queue_count { 0 }; u8 m_status { 0 }; u64 m_accepted_features { 0 }; - bool m_did_accept_features { false }; - bool m_did_setup_queues { false }; + SetOnce m_did_accept_features; + SetOnce m_did_setup_queues; NonnullOwnPtr const m_transport_entity; }; diff --git a/Kernel/Bus/VirtIO/Transport/Entity.cpp b/Kernel/Bus/VirtIO/Transport/Entity.cpp index 50097aa456..322d7ca9ac 100644 --- a/Kernel/Bus/VirtIO/Transport/Entity.cpp +++ b/Kernel/Bus/VirtIO/Transport/Entity.cpp @@ -10,7 +10,7 @@ namespace Kernel::VirtIO { auto TransportEntity::mapping_for_resource_index(u8 resource_index) -> IOWindow& { - VERIFY(m_use_mmio); + VERIFY(m_use_mmio.was_set()); VERIFY(m_register_bases[resource_index]); return *m_register_bases[resource_index]; } diff --git a/Kernel/Bus/VirtIO/Transport/Entity.h b/Kernel/Bus/VirtIO/Transport/Entity.h index fb7fff7d04..7204e910b8 100644 --- a/Kernel/Bus/VirtIO/Transport/Entity.h +++ b/Kernel/Bus/VirtIO/Transport/Entity.h @@ -6,6 +6,7 @@ #pragma once +#include #include #include #include @@ -90,7 +91,7 @@ protected: IOWindow& base_io_window(); Array, 6> m_register_bases; - bool m_use_mmio { false }; + SetOnce m_use_mmio; u32 m_notify_multiplier { 0 }; }; diff --git a/Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.cpp b/Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.cpp index 6d2d73b323..dcf1bf65a3 100644 --- a/Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.cpp +++ b/Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.cpp @@ -123,7 +123,7 @@ ErrorOr PCIeTransportLink::locate_configurations_and_resources(Badge PCIeTransportLink::locate_configurations_and_resources(Badge(cfg.resource_index))); m_register_bases[cfg.resource_index] = move(mapping_io_window); diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 89d9a5e3f1..76c6665873 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -452,7 +452,7 @@ static void* kmalloc_impl(size_t size, size_t alignment, CallerWillInitializeMem SpinlockLocker lock(s_lock); ++g_kmalloc_call_count; - if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) { + if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available.was_set()) { dbgln("kmalloc({})", size); Kernel::dump_backtrace(); } diff --git a/Kernel/KSyms.cpp b/Kernel/KSyms.cpp index 7ed4df99c7..20ef2ba5b4 100644 --- a/Kernel/KSyms.cpp +++ b/Kernel/KSyms.cpp @@ -17,7 +17,7 @@ namespace Kernel { FlatPtr g_lowest_kernel_symbol_address = 0xffffffff; FlatPtr g_highest_kernel_symbol_address = 0; -bool g_kernel_symbols_available = false; +SetOnce g_kernel_symbols_available; extern "C" { __attribute__((section(".kernel_symbols"))) char kernel_symbols[5 * MiB] {}; @@ -107,7 +107,7 @@ UNMAP_AFTER_INIT static void load_kernel_symbols_from_data(Bytes buffer) ++bufptr; ++current_symbol_index; } - g_kernel_symbols_available = true; + g_kernel_symbols_available.set(); } NEVER_INLINE static void dump_backtrace_impl(FlatPtr frame_pointer, bool use_ksyms, PrintToScreen print_to_screen) @@ -121,7 +121,7 @@ NEVER_INLINE static void dump_backtrace_impl(FlatPtr frame_pointer, bool use_ksy } while (0) SmapDisabler disabler; - if (use_ksyms && !g_kernel_symbols_available) + if (use_ksyms && !g_kernel_symbols_available.was_set()) Processor::halt(); struct RecognizedSymbol { @@ -235,7 +235,7 @@ void dump_backtrace(PrintToScreen print_to_screen) TemporaryChange disable_kmalloc_stacks(g_dump_kmalloc_stacks, false); FlatPtr base_pointer = (FlatPtr)__builtin_frame_address(0); - dump_backtrace_impl(base_pointer, g_kernel_symbols_available, print_to_screen); + dump_backtrace_impl(base_pointer, g_kernel_symbols_available.was_set(), print_to_screen); } UNMAP_AFTER_INIT void load_kernel_symbol_table() diff --git a/Kernel/KSyms.h b/Kernel/KSyms.h index 518a9f2984..da5f8614f0 100644 --- a/Kernel/KSyms.h +++ b/Kernel/KSyms.h @@ -7,6 +7,7 @@ #pragma once #include +#include namespace Kernel { @@ -24,7 +25,7 @@ FlatPtr address_for_kernel_symbol(StringView name); KernelSymbol const* symbolicate_kernel_address(FlatPtr); void load_kernel_symbol_table(); -extern bool g_kernel_symbols_available; +extern SetOnce g_kernel_symbols_available; extern FlatPtr g_lowest_kernel_symbol_address; extern FlatPtr g_highest_kernel_symbol_address; diff --git a/Kernel/Library/KString.cpp b/Kernel/Library/KString.cpp index 8469de75a1..9435782ee0 100644 --- a/Kernel/Library/KString.cpp +++ b/Kernel/Library/KString.cpp @@ -5,10 +5,11 @@ */ #include +#include #include #include -extern bool g_in_early_boot; +extern SetOnce g_not_in_early_boot; namespace Kernel { @@ -33,7 +34,7 @@ ErrorOr> KString::vformatted(StringView fmtstr, AK::TypeE NonnullOwnPtr KString::must_create(StringView string) { // We can only enforce success during early boot. - VERIFY(g_in_early_boot); + VERIFY(!g_not_in_early_boot.was_set()); return KString::try_create(string).release_value(); } @@ -51,7 +52,7 @@ ErrorOr> KString::try_create_uninitialized(size_t length, NonnullOwnPtr KString::must_create_uninitialized(size_t length, char*& characters) { // We can only enforce success during early boot. - VERIFY(g_in_early_boot); + VERIFY(!g_not_in_early_boot.was_set()); return KString::try_create_uninitialized(length, characters).release_value(); } diff --git a/Kernel/Locking/Mutex.cpp b/Kernel/Locking/Mutex.cpp index c2836ac977..f6952eb62d 100644 --- a/Kernel/Locking/Mutex.cpp +++ b/Kernel/Locking/Mutex.cpp @@ -5,6 +5,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include #include @@ -12,7 +13,7 @@ #include #include -extern bool g_in_early_boot; +extern SetOnce g_not_in_early_boot; namespace Kernel { @@ -23,7 +24,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location) VERIFY(!Processor::current_in_irq()); if constexpr (LOCK_IN_CRITICAL_DEBUG) { // There are no interrupts enabled in early boot. - if (!g_in_early_boot) + if (g_not_in_early_boot.was_set()) VERIFY_INTERRUPTS_ENABLED(); } VERIFY(mode != Mode::Unlocked); @@ -151,7 +152,7 @@ void Mutex::unlock() VERIFY(!Processor::current_in_irq()); if constexpr (LOCK_IN_CRITICAL_DEBUG) { // There are no interrupts enabled in early boot. - if (!g_in_early_boot) + if (g_not_in_early_boot.was_set()) VERIFY_INTERRUPTS_ENABLED(); } auto* current_thread = Thread::current(); @@ -211,7 +212,7 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker #include #include +#include #include #include #include @@ -89,8 +90,8 @@ public: [[nodiscard]] bool is_stack() const { return m_stack; } void set_stack(bool stack) { m_stack = stack; } - [[nodiscard]] bool is_immutable() const { return m_immutable; } - void set_immutable() { m_immutable = true; } + [[nodiscard]] bool is_immutable() const { return m_immutable.was_set(); } + void set_immutable() { m_immutable.set(); } [[nodiscard]] bool is_mmap() const { return m_mmap; } @@ -243,12 +244,13 @@ private: bool m_cacheable : 1 { false }; bool m_stack : 1 { false }; bool m_mmap : 1 { false }; - bool m_immutable : 1 { false }; bool m_syscall_region : 1 { false }; bool m_write_combine : 1 { false }; bool m_mmapped_from_readable : 1 { false }; bool m_mmapped_from_writable : 1 { false }; + SetOnce m_immutable; + IntrusiveRedBlackTreeNode> m_tree_node; IntrusiveListNode m_vmobject_list_node; diff --git a/Kernel/Net/IPv4Socket.cpp b/Kernel/Net/IPv4Socket.cpp index ab84fd19eb..0aa88e2555 100644 --- a/Kernel/Net/IPv4Socket.cpp +++ b/Kernel/Net/IPv4Socket.cpp @@ -97,19 +97,19 @@ void IPv4Socket::get_peer_address(sockaddr* address, socklen_t* address_size) ErrorOr IPv4Socket::ensure_bound() { - dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket::ensure_bound() m_bound {}", m_bound); - if (m_bound) + dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket::ensure_bound() m_bound {}", m_bound.was_set()); + if (m_bound.was_set()) return {}; auto result = protocol_bind(); if (!result.is_error()) - m_bound = true; + m_bound.set(); return result; } ErrorOr IPv4Socket::bind(Credentials const& credentials, Userspace user_address, socklen_t address_size) { - if (m_bound) + if (m_bound.was_set()) return set_so_error(EINVAL); VERIFY(setup_state() == SetupState::Unstarted); diff --git a/Kernel/Net/IPv4Socket.h b/Kernel/Net/IPv4Socket.h index c6c2bf0459..9a70bcd581 100644 --- a/Kernel/Net/IPv4Socket.h +++ b/Kernel/Net/IPv4Socket.h @@ -7,6 +7,7 @@ #pragma once #include +#include #include #include #include @@ -73,7 +74,7 @@ protected: IPv4Socket(int type, int protocol, NonnullOwnPtr receive_buffer, OwnPtr optional_scratch_buffer); virtual StringView class_name() const override { return "IPv4Socket"sv; } - void set_bound(bool bound) { m_bound = bound; } + void set_bound() { m_bound.set(); } ErrorOr ensure_bound(); virtual ErrorOr protocol_bind() { return {}; } @@ -107,7 +108,7 @@ private: Vector m_multicast_memberships; bool m_multicast_loop { true }; - bool m_bound { false }; + SetOnce m_bound; struct ReceivedPacket { IPv4Address peer_address; diff --git a/Kernel/Net/Intel/E1000ENetworkAdapter.cpp b/Kernel/Net/Intel/E1000ENetworkAdapter.cpp index ca3b128747..0d7084882f 100644 --- a/Kernel/Net/Intel/E1000ENetworkAdapter.cpp +++ b/Kernel/Net/Intel/E1000ENetworkAdapter.cpp @@ -221,7 +221,7 @@ UNMAP_AFTER_INIT ErrorOr E1000ENetworkAdapter::initialize(Badge E1000NetworkAdapter::initialize(Badge +#include #include #include #include @@ -96,7 +97,7 @@ protected: NonnullOwnPtr m_tx_buffer_region; Array m_rx_buffers; Array m_tx_buffers; - bool m_has_eeprom { false }; + SetOnce m_has_eeprom; bool m_link_up { false }; EntropySource m_entropy_source; diff --git a/Kernel/Net/LocalSocket.cpp b/Kernel/Net/LocalSocket.cpp index a04fb34af1..1760d5d895 100644 --- a/Kernel/Net/LocalSocket.cpp +++ b/Kernel/Net/LocalSocket.cpp @@ -156,13 +156,13 @@ ErrorOr LocalSocket::bind(Credentials const& credentials, Userspace LocalSocket::connect(Credentials const& credentials, OpenFileDescription& description, Userspace user_address, socklen_t address_size) { - if (m_bound) + if (m_bound.was_set()) return set_so_error(EISCONN); if (address_size > sizeof(sockaddr_un)) @@ -260,7 +260,7 @@ void LocalSocket::detach(OpenFileDescription& description) VERIFY(m_accept_side_fd_open); m_accept_side_fd_open = false; - if (m_bound) { + if (m_bound.was_set()) { if (m_inode) m_inode->unbind_socket(); } diff --git a/Kernel/Net/LocalSocket.h b/Kernel/Net/LocalSocket.h index ca0261a268..f9325b4dc3 100644 --- a/Kernel/Net/LocalSocket.h +++ b/Kernel/Net/LocalSocket.h @@ -7,6 +7,7 @@ #pragma once #include +#include #include #include @@ -94,7 +95,7 @@ private: return m_role; } - bool m_bound { false }; + SetOnce m_bound; bool m_accept_side_fd_open { false }; OwnPtr m_path; diff --git a/Kernel/Net/TCPSocket.cpp b/Kernel/Net/TCPSocket.cpp index 15d2893d38..9e76d29eef 100644 --- a/Kernel/Net/TCPSocket.cpp +++ b/Kernel/Net/TCPSocket.cpp @@ -157,7 +157,7 @@ ErrorOr> TCPSocket::try_create_client(IPv4Address const client->set_local_port(new_local_port); client->set_peer_address(new_peer_address); client->set_peer_port(new_peer_port); - client->set_bound(true); + client->set_bound(); client->set_direction(Direction::Incoming); client->set_originator(*this); diff --git a/Kernel/SanCov.cpp b/Kernel/SanCov.cpp index 897d8ebe1b..43bfdb57ad 100644 --- a/Kernel/SanCov.cpp +++ b/Kernel/SanCov.cpp @@ -5,12 +5,13 @@ */ #include +#include #include #include #include #include -extern bool g_in_early_boot; +extern SetOnce g_not_in_early_boot; #ifdef ENABLE_KERNEL_COVERAGE_COLLECTION_DEBUG // Set kcov_emergency_off=true before making calls from __sanitizer_cov_trace_pc to coverage @@ -36,7 +37,7 @@ static void crash_and_burn(Thread* thread) extern "C" void __sanitizer_cov_trace_pc(void); extern "C" void __sanitizer_cov_trace_pc(void) { - if (g_in_early_boot) [[unlikely]] + if (!g_not_in_early_boot.was_set()) [[unlikely]] return; auto* thread = Processor::current_thread(); diff --git a/Kernel/Tasks/Process.cpp b/Kernel/Tasks/Process.cpp index b2622d4514..3baf3bcc53 100644 --- a/Kernel/Tasks/Process.cpp +++ b/Kernel/Tasks/Process.cpp @@ -519,7 +519,7 @@ void Process::crash(int signal, Optional regs, bool out_of if (out_of_memory) { dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this); } else { - if (ip >= kernel_load_base && g_kernel_symbols_available) { + if (ip >= kernel_load_base && g_kernel_symbols_available.was_set()) { auto const* symbol = symbolicate_kernel_address(ip); dbgln("\033[31;1m{:p} {} +{}\033[0m\n", ip, (symbol ? symbol->name : "(k?)"), (symbol ? ip - symbol->address : 0)); } else { diff --git a/Kernel/Time/TimeManagement.cpp b/Kernel/Time/TimeManagement.cpp index 2d79dd0e8a..72838d2e2f 100644 --- a/Kernel/Time/TimeManagement.cpp +++ b/Kernel/Time/TimeManagement.cpp @@ -118,7 +118,7 @@ MonotonicTime TimeManagement::monotonic_time(TimePrecision precision) const u64 seconds; u32 ticks; - bool do_query = precision == TimePrecision::Precise && m_can_query_precise_time; + bool do_query = precision == TimePrecision::Precise && m_can_query_precise_time.was_set(); u32 update_iteration; do { @@ -380,7 +380,7 @@ UNMAP_AFTER_INIT bool TimeManagement::probe_and_set_x86_non_legacy_hardware_time // Use the HPET main counter frequency for time purposes. This is likely // a much higher frequency than the interrupt itself and allows us to // keep a more accurate time - m_can_query_precise_time = true; + m_can_query_precise_time.set(); m_time_ticks_per_second = HPET::the().frequency(); m_system_timer->try_to_set_frequency(m_system_timer->calculate_nearest_possible_frequency(OPTIMAL_TICKS_PER_SECOND_RATE)); diff --git a/Kernel/Time/TimeManagement.h b/Kernel/Time/TimeManagement.h index 834117617a..26d34793db 100644 --- a/Kernel/Time/TimeManagement.h +++ b/Kernel/Time/TimeManagement.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -72,7 +73,7 @@ public: // FIXME: Most likely broken, because it does not check m_update[12] for in-progress updates. void set_remaining_epoch_time_adjustment(Duration adjustment) { m_remaining_epoch_time_adjustment = adjustment; } - bool can_query_precise_time() const { return m_can_query_precise_time; } + bool can_query_precise_time() const { return m_can_query_precise_time.was_set(); } Memory::VMObject& time_page_vmobject(); @@ -108,7 +109,7 @@ private: Atomic m_update2 { 0 }; u32 m_time_ticks_per_second { 0 }; // may be different from interrupts/second (e.g. hpet) - bool m_can_query_precise_time { false }; + SetOnce m_can_query_precise_time; bool m_updating_time { false }; // may only be accessed from the BSP! LockRefPtr m_system_timer;