Skip to content

Convert ThreadPlanStack's mutex to a shared mutex. #116438

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Nov 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions lldb/include/lldb/Target/ThreadPlanStack.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
#include <unordered_map>
#include <vector>

#include "llvm/Support/RWMutex.h"

#include "lldb/Target/Target.h"
#include "lldb/Target/Thread.h"
#include "lldb/lldb-private-forward.h"
Expand Down Expand Up @@ -96,9 +98,12 @@ class ThreadPlanStack {
void ClearThreadCache();

private:
void PrintOneStack(Stream &s, llvm::StringRef stack_name,
const PlanStack &stack, lldb::DescriptionLevel desc_level,
bool include_internal) const;
lldb::ThreadPlanSP DiscardPlanNoLock();
lldb::ThreadPlanSP GetCurrentPlanNoLock() const;
void PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name,
const PlanStack &stack,
lldb::DescriptionLevel desc_level,
bool include_internal) const;

PlanStack m_plans; ///< The stack of plans this thread is executing.
PlanStack m_completed_plans; ///< Plans that have been completed by this
Expand All @@ -110,7 +115,7 @@ class ThreadPlanStack {
size_t m_completed_plan_checkpoint = 0; // Monotonically increasing token for
// completed plan checkpoints.
std::unordered_map<size_t, PlanStack> m_completed_plan_store;
mutable std::recursive_mutex m_stack_mutex;
mutable llvm::sys::RWMutex m_stack_mutex;
};

class ThreadPlanStackMap {
Expand Down
108 changes: 59 additions & 49 deletions lldb/source/Target/ThreadPlanStack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,21 @@ ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
void ThreadPlanStack::DumpThreadPlans(Stream &s,
lldb::DescriptionLevel desc_level,
bool include_internal) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
s.IndentMore();
PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
include_internal);
PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
include_internal);
PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level,
include_internal);
PrintOneStackNoLock(s, "Completed plan stack", m_completed_plans, desc_level,
include_internal);
PrintOneStackNoLock(s, "Discarded plan stack", m_discarded_plans, desc_level,
include_internal);
s.IndentLess();
}

void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
const PlanStack &stack,
lldb::DescriptionLevel desc_level,
bool include_internal) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name,
const PlanStack &stack,
lldb::DescriptionLevel desc_level,
bool include_internal) const {
// If the stack is empty, just exit:
if (stack.empty())
return;
Expand Down Expand Up @@ -82,15 +82,15 @@ void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
}

size_t ThreadPlanStack::CheckpointCompletedPlans() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
m_completed_plan_checkpoint++;
m_completed_plan_store.insert(
std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
return m_completed_plan_checkpoint;
}

void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
auto result = m_completed_plan_store.find(checkpoint);
assert(result != m_completed_plan_store.end() &&
"Asked for a checkpoint that didn't exist");
Expand All @@ -99,13 +99,13 @@ void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
}

void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
m_completed_plan_store.erase(checkpoint);
}

void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
// Tell the plan stacks that this thread is going away:
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
for (ThreadPlanSP plan : m_plans)
plan->ThreadDestroyed();

Expand Down Expand Up @@ -134,20 +134,22 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
// If the thread plan doesn't already have a tracer, give it its parent's
// tracer:
// The first plan has to be a base plan:
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
"Zeroth plan must be a base plan");

if (!new_plan_sp->GetThreadPlanTracer()) {
assert(!m_plans.empty());
new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
{ // Scope for Lock - DidPush often adds plans to the stack:
llvm::sys::ScopedWriter guard(m_stack_mutex);
assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
"Zeroth plan must be a base plan");

if (!new_plan_sp->GetThreadPlanTracer()) {
assert(!m_plans.empty());
new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
}
m_plans.push_back(new_plan_sp);
}
m_plans.push_back(new_plan_sp);
new_plan_sp->DidPush();
}

lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
assert(m_plans.size() > 1 && "Can't pop the base thread plan");

// Note that moving the top element of the vector would leave it in an
Expand All @@ -161,7 +163,11 @@ lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
}

lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
return DiscardPlanNoLock();
}

lldb::ThreadPlanSP ThreadPlanStack::DiscardPlanNoLock() {
assert(m_plans.size() > 1 && "Can't discard the base thread plan");

// Note that moving the top element of the vector would leave it in an
Expand All @@ -177,12 +183,12 @@ lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
// If the input plan is nullptr, discard all plans. Otherwise make sure this
// plan is in the stack, and if so discard up to and including it.
void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
int stack_size = m_plans.size();

if (up_to_plan_ptr == nullptr) {
for (int i = stack_size - 1; i > 0; i--)
DiscardPlan();
DiscardPlanNoLock();
return;
}

Expand All @@ -197,23 +203,23 @@ void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
if (found_it) {
bool last_one = false;
for (int i = stack_size - 1; i > 0 && !last_one; i--) {
if (GetCurrentPlan().get() == up_to_plan_ptr)
if (GetCurrentPlanNoLock().get() == up_to_plan_ptr)
last_one = true;
DiscardPlan();
DiscardPlanNoLock();
}
}
}

void ThreadPlanStack::DiscardAllPlans() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
int stack_size = m_plans.size();
for (int i = stack_size - 1; i > 0; i--) {
DiscardPlan();
DiscardPlanNoLock();
}
}

void ThreadPlanStack::DiscardConsultingControllingPlans() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
while (true) {
int controlling_plan_idx;
bool discard = true;
Expand All @@ -234,26 +240,30 @@ void ThreadPlanStack::DiscardConsultingControllingPlans() {

// First pop all the dependent plans:
for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) {
DiscardPlan();
DiscardPlanNoLock();
}

// Now discard the controlling plan itself.
// The bottom-most plan never gets discarded. "OkayToDiscard" for it
// means discard it's dependent plans, but not it...
if (controlling_plan_idx > 0) {
DiscardPlan();
DiscardPlanNoLock();
}
}
}

lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
return GetCurrentPlanNoLock();
}

lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlanNoLock() const {
assert(m_plans.size() != 0 && "There will always be a base plan.");
return m_plans.back();
}

lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
if (m_completed_plans.empty())
return {};

Expand All @@ -271,7 +281,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {

lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
bool skip_private) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
uint32_t idx = 0;

for (lldb::ThreadPlanSP plan_sp : m_plans) {
Expand All @@ -285,7 +295,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
}

lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
if (m_completed_plans.empty())
return {};

Expand All @@ -299,7 +309,7 @@ lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
}

lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
if (m_completed_plans.empty())
return {};

Expand All @@ -312,23 +322,23 @@ lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
return {};
}
bool ThreadPlanStack::AnyPlans() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
// There is always a base plan...
return m_plans.size() > 1;
}

bool ThreadPlanStack::AnyCompletedPlans() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
return !m_completed_plans.empty();
}

bool ThreadPlanStack::AnyDiscardedPlans() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
return !m_discarded_plans.empty();
}

bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
for (auto plan : m_completed_plans) {
if (plan.get() == in_plan)
return true;
Expand All @@ -337,7 +347,7 @@ bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
}

bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
for (auto plan : m_discarded_plans) {
if (plan.get() == in_plan)
return true;
Expand All @@ -346,7 +356,7 @@ bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
}

ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
if (current_plan == nullptr)
return nullptr;

Expand All @@ -361,7 +371,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
// If this is the first completed plan, the previous one is the
// bottom of the regular plan stack.
if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
return GetCurrentPlan().get();
return GetCurrentPlanNoLock().get();
}

// Otherwise look for it in the regular plans.
Expand All @@ -374,7 +384,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
}

ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
int stack_size = m_plans.size();

for (int i = stack_size - 1; i > 0; i--) {
Expand All @@ -385,13 +395,13 @@ ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
}

void ThreadPlanStack::ClearThreadCache() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedReader guard(m_stack_mutex);
for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
thread_plan_sp->ClearThreadCache();
}

void ThreadPlanStack::WillResume() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
llvm::sys::ScopedWriter guard(m_stack_mutex);
m_completed_plans.clear();
m_discarded_plans.clear();
}
Expand Down
Loading