-
-
Notifications
You must be signed in to change notification settings - Fork 3.3k
Kernel: Allow acquiring the same LockRank multiple times #25932
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
bfed4e8
282c38e
8a48568
1c2574b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,21 +12,22 @@ | |
|
||
namespace Kernel { | ||
|
||
void track_lock_acquire(LockRank rank) | ||
bool track_lock_acquire(LockRank rank) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please replace this bool with an enum. |
||
{ | ||
if constexpr (LOCK_RANK_ENFORCEMENT) { | ||
auto* thread = Thread::current(); | ||
if (thread && !thread->is_crashing()) | ||
thread->track_lock_acquire(rank); | ||
return thread->track_lock_acquire(rank); | ||
} | ||
return false; | ||
} | ||
|
||
void track_lock_release(LockRank rank) | ||
void track_lock_release(LockRank rank, bool change_state) | ||
{ | ||
if constexpr (LOCK_RANK_ENFORCEMENT) { | ||
auto* thread = Thread::current(); | ||
if (thread && !thread->is_crashing()) | ||
thread->track_lock_release(rank); | ||
thread->track_lock_release(rank, change_state); | ||
} | ||
} | ||
|
||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: commit message: "They does" should be This does" I think. |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -34,10 +34,13 @@ enum class LockRank : int { | |
// Process locks are the highest rank, as they normally are taken | ||
// first thing when processing syscalls. | ||
Process = 0x010, | ||
|
||
// Mutexes need to be taken before spinlocks, so they get their own lock-rank | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: s/lock-rank/LockRank. The hyphen looks weird. |
||
Mutex = 0x020, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note to self: We should reorganize this bitfield such that lowest 16 bits are reserved for spinlocks (and 0x8000 is the Process lock) and highest 16 bits are reserved for mutexes. If you want you can already make this 0x1'0000 even though the circular dependency prevents any rank differentiation on Mutex right now. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Currently Mutexes don't have any lasting effect on the lock rank, as this technically is the lock rank of the internal spinlock of the mutex... |
||
}; | ||
|
||
AK_ENUM_BITWISE_OPERATORS(LockRank); | ||
|
||
void track_lock_acquire(LockRank); | ||
void track_lock_release(LockRank); | ||
[[nodiscard]] bool track_lock_acquire(LockRank); | ||
void track_lock_release(LockRank, bool change_state); | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,11 +10,13 @@ | |
#include <AK/Assertions.h> | ||
#include <AK/Atomic.h> | ||
#include <AK/HashMap.h> | ||
#include <AK/Noncopyable.h> | ||
#include <AK/Types.h> | ||
#include <Kernel/Forward.h> | ||
#include <Kernel/Locking/LockLocation.h> | ||
#include <Kernel/Locking/LockMode.h> | ||
#include <Kernel/Tasks/WaitQueue.h> | ||
#include <Kernel/Locking/LockRank.h> | ||
#include <Kernel/Tasks/Thread.h> | ||
|
||
namespace Kernel { | ||
|
||
|
@@ -26,6 +28,11 @@ class Mutex { | |
|
||
public: | ||
using Mode = LockMode; | ||
// FIXME: Allow some customization | ||
// Note: The internal Spinlock might have a different Rank than the Mutex | ||
// Note: Making this a template would currently mess with Threads and our implementation | ||
// As we currently include Thread.h and Thread uses Mutexes in its API | ||
Comment on lines
+32
to
+34
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 1st note: Does this might refer to something that should be done in the future? Because currently the spinlocks can't have a dfiferent rank. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. IIRC Mutecies are part of the out-of-line API of Thread so slapping a template on them would make that harder |
||
constexpr static LockRank Rank = LockRank::Mutex; | ||
|
||
// FIXME: remove this after annihilating Process::m_big_lock | ||
enum class MutexBehavior { | ||
|
@@ -40,6 +47,8 @@ class Mutex { | |
} | ||
~Mutex() = default; | ||
|
||
// FIXME: This should have a lasting effect on the threads lock-rank | ||
// and therefore return some sort of key for unlocking | ||
Comment on lines
+50
to
+51
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So do you want to give Mutexes multiple lock ranks as well? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Would probably make sense in the future |
||
void lock(Mode mode = Mode::Exclusive, LockLocation const& location = LockLocation::current()); | ||
void restore_exclusive_lock(u32, LockLocation const& location = LockLocation::current()); | ||
|
||
|
@@ -83,7 +92,7 @@ class Mutex { | |
using BigLockBlockedThreadList = IntrusiveList<&Thread::m_big_lock_blocked_threads_list_node>; | ||
|
||
// FIXME: Allow any lock rank. | ||
void block(Thread&, Mode, SpinlockLocker<Spinlock<LockRank::None>>&, u32); | ||
void block(Thread&, Mode, SpinlockLocker<Spinlock<Rank>>&, u32); | ||
void unblock_waiters(Mode); | ||
|
||
StringView m_name; | ||
|
@@ -120,7 +129,7 @@ class Mutex { | |
RecursiveSpinlockProtected<BlockedThreadLists, LockRank::None> m_blocked_thread_lists {}; | ||
|
||
// FIXME: See above. | ||
mutable Spinlock<LockRank::None> m_lock {}; | ||
mutable Spinlock<Rank> m_lock {}; | ||
|
||
#if LOCK_SHARED_UPGRADE_DEBUG | ||
HashMap<uintptr_t, u32> m_shared_holders_map; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -13,6 +13,11 @@ | |
|
||
namespace Kernel { | ||
|
||
struct [[nodiscard]] SpinLockKey { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. s/SpinLock/Spinlock |
||
InterruptsState interrupts_state { InterruptsState::Disabled }; | ||
bool affect_lock_rank { false }; | ||
}; | ||
|
||
template<LockRank Rank> | ||
class Spinlock { | ||
AK_MAKE_NONCOPYABLE(Spinlock); | ||
|
@@ -21,25 +26,24 @@ class Spinlock { | |
public: | ||
Spinlock() = default; | ||
|
||
InterruptsState lock() | ||
SpinLockKey lock() | ||
{ | ||
InterruptsState previous_interrupts_state = Processor::interrupts_state(); | ||
Processor::enter_critical(); | ||
Processor::disable_interrupts(); | ||
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) | ||
Processor::wait_check(); | ||
track_lock_acquire(m_rank); | ||
return previous_interrupts_state; | ||
return { previous_interrupts_state, track_lock_acquire(m_rank) }; | ||
} | ||
|
||
void unlock(InterruptsState previous_interrupts_state) | ||
void unlock(SpinLockKey key) | ||
{ | ||
VERIFY(is_locked()); | ||
track_lock_release(m_rank); | ||
track_lock_release(m_rank, key.affect_lock_rank); | ||
m_lock.store(0, AK::memory_order_release); | ||
|
||
Processor::leave_critical(); | ||
Processor::restore_interrupts_state(previous_interrupts_state); | ||
Processor::restore_interrupts_state(key.interrupts_state); | ||
} | ||
|
||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const | ||
|
@@ -52,6 +56,8 @@ class Spinlock { | |
m_lock.store(0, AK::memory_order_relaxed); | ||
} | ||
|
||
constexpr LockRank rank() { return Rank; } | ||
|
||
private: | ||
Atomic<u8> m_lock { 0 }; | ||
static constexpr LockRank const m_rank { Rank }; | ||
|
@@ -65,7 +71,7 @@ class RecursiveSpinlock { | |
public: | ||
RecursiveSpinlock() = default; | ||
|
||
InterruptsState lock() | ||
SpinLockKey lock() | ||
{ | ||
InterruptsState previous_interrupts_state = Processor::interrupts_state(); | ||
Processor::disable_interrupts(); | ||
|
@@ -79,24 +85,25 @@ class RecursiveSpinlock { | |
Processor::wait_check(); | ||
expected = 0; | ||
} | ||
bool did_affect_lock_rank = false; | ||
if (m_recursions == 0) | ||
track_lock_acquire(m_rank); | ||
did_affect_lock_rank = track_lock_acquire(m_rank); | ||
m_recursions++; | ||
return previous_interrupts_state; | ||
return { previous_interrupts_state, did_affect_lock_rank }; | ||
} | ||
|
||
void unlock(InterruptsState previous_interrupts_state) | ||
void unlock(SpinLockKey key) | ||
{ | ||
VERIFY_INTERRUPTS_DISABLED(); | ||
VERIFY(m_recursions > 0); | ||
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); | ||
if (--m_recursions == 0) { | ||
track_lock_release(m_rank); | ||
track_lock_release(m_rank, key.affect_lock_rank); | ||
m_lock.store(0, AK::memory_order_release); | ||
} | ||
|
||
Processor::leave_critical(); | ||
Processor::restore_interrupts_state(previous_interrupts_state); | ||
Processor::restore_interrupts_state(key.interrupts_state); | ||
} | ||
|
||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const | ||
|
@@ -114,6 +121,8 @@ class RecursiveSpinlock { | |
m_lock.store(0, AK::memory_order_relaxed); | ||
} | ||
|
||
constexpr LockRank rank() { return Rank; } | ||
|
||
private: | ||
Atomic<FlatPtr> m_lock { 0 }; | ||
u32 m_recursions { 0 }; | ||
|
@@ -132,41 +141,41 @@ class [[nodiscard]] SpinlockLocker { | |
: m_lock(&lock) | ||
{ | ||
VERIFY(m_lock); | ||
m_previous_interrupts_state = m_lock->lock(); | ||
m_key = m_lock->lock(); | ||
m_have_lock = true; | ||
} | ||
|
||
SpinlockLocker(SpinlockLocker&& from) | ||
: m_lock(from.m_lock) | ||
, m_previous_interrupts_state(from.m_previous_interrupts_state) | ||
, m_key(from.m_key) | ||
, m_have_lock(from.m_have_lock) | ||
{ | ||
from.m_lock = nullptr; | ||
from.m_previous_interrupts_state = InterruptsState::Disabled; | ||
from.m_key = {}; | ||
from.m_have_lock = false; | ||
} | ||
|
||
~SpinlockLocker() | ||
{ | ||
if (m_lock && m_have_lock) { | ||
m_lock->unlock(m_previous_interrupts_state); | ||
m_lock->unlock(m_key); | ||
} | ||
} | ||
|
||
ALWAYS_INLINE void lock() | ||
{ | ||
VERIFY(m_lock); | ||
VERIFY(!m_have_lock); | ||
m_previous_interrupts_state = m_lock->lock(); | ||
m_key = m_lock->lock(); | ||
m_have_lock = true; | ||
} | ||
|
||
ALWAYS_INLINE void unlock() | ||
{ | ||
VERIFY(m_lock); | ||
VERIFY(m_have_lock); | ||
m_lock->unlock(m_previous_interrupts_state); | ||
m_previous_interrupts_state = InterruptsState::Disabled; | ||
m_lock->unlock(m_key); | ||
m_key = {}; | ||
m_have_lock = false; | ||
} | ||
|
||
|
@@ -177,7 +186,7 @@ class [[nodiscard]] SpinlockLocker { | |
|
||
private: | ||
LockType* m_lock { nullptr }; | ||
InterruptsState m_previous_interrupts_state { InterruptsState::Disabled }; | ||
SpinLockKey m_key {}; | ||
bool m_have_lock { false }; | ||
}; | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -997,7 +997,12 @@ ErrorOr<FlatPtr> Process::sys$execve(Userspace<Syscall::SC_execve_params const*> | |
// thread. We should also still be in our critical section | ||
VERIFY(!g_scheduler_lock.is_locked_by_current_processor()); | ||
VERIFY(Processor::in_critical() == 1); | ||
g_scheduler_lock.lock(); | ||
|
||
auto key = g_scheduler_lock.lock(); | ||
VERIFY(key.interrupts_state == InterruptsState::Disabled); | ||
// FIXME: Remove the None check | ||
VERIFY(key.affect_lock_rank || g_scheduler_lock.rank() == LockRank::None); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What is the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. good question, I dont rememeber that anymore, will check that again |
||
|
||
current_thread->set_state(Thread::State::Running); | ||
Processor::assume_context(*current_thread, previous_interrupts_state); | ||
VERIFY_NOT_REACHED(); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -184,7 +184,10 @@ UNMAP_AFTER_INIT void Scheduler::start() | |
|
||
// We need to acquire our scheduler lock, which will be released | ||
// by the idle thread once control transferred there | ||
g_scheduler_lock.lock(); | ||
auto key = g_scheduler_lock.lock(); | ||
// FIXME: InterruptsEnabled check | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why can't you check if interrupts were disabled here? |
||
// FIXME: Remove the None check | ||
VERIFY(key.affect_lock_rank || g_scheduler_lock.rank() == LockRank::None); | ||
|
||
auto& processor = Processor::current(); | ||
VERIFY(processor.is_initialized()); | ||
|
@@ -336,7 +339,7 @@ void Scheduler::leave_on_first_switch(InterruptsState previous_interrupts_state) | |
// At this point, enter_current has already be called, but because | ||
// Scheduler::context_switch is not in the call stack we need to | ||
// clean up and release locks manually here | ||
g_scheduler_lock.unlock(previous_interrupts_state); | ||
g_scheduler_lock.unlock({ .interrupts_state = previous_interrupts_state, .affect_lock_rank = true }); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are we guaranteed to always be on a different lock rank here? |
||
|
||
VERIFY(Processor::current_in_scheduler()); | ||
Processor::set_current_in_scheduler(false); | ||
|
@@ -357,7 +360,10 @@ void Scheduler::prepare_for_idle_loop() | |
// This is called when the CPU finished setting up the idle loop | ||
// and is about to run it. We need to acquire the scheduler lock | ||
VERIFY(!g_scheduler_lock.is_locked_by_current_processor()); | ||
g_scheduler_lock.lock(); | ||
auto key = g_scheduler_lock.lock(); | ||
// FIXME: InterruptsEnabled check | ||
// FIXME: Remove the None check | ||
VERIFY(key.affect_lock_rank || g_scheduler_lock.rank() == LockRank::None); | ||
|
||
VERIFY(!Processor::current_in_scheduler()); | ||
Processor::set_current_in_scheduler(true); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I guess this is fine because swap only modifies the pointer and not the pointed-to object (so the caller still sees the original custody afterwards)? I still think this is kind of awkward.
Can't we add a temporary Custody at the start of this function to extended its lifetime and move it into there?