mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2024-11-26 11:45:39 +01:00
yuzu: debugger: Ignore HLE threads.
This commit is contained in:
parent
3856564727
commit
10738839ad
@ -101,11 +101,12 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
|||||||
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
|
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
thread_type_for_debugging = type;
|
||||||
|
|
||||||
// Set the ideal core ID and affinity mask.
|
// Set the ideal core ID and affinity mask.
|
||||||
virtual_ideal_core_id = virt_core;
|
virtual_ideal_core_id = virt_core;
|
||||||
physical_ideal_core_id = phys_core;
|
physical_ideal_core_id = phys_core;
|
||||||
virtual_affinity_mask = (static_cast<u64>(1) << virt_core);
|
virtual_affinity_mask = 1ULL << virt_core;
|
||||||
physical_affinity_mask.SetAffinity(phys_core, true);
|
physical_affinity_mask.SetAffinity(phys_core, true);
|
||||||
|
|
||||||
// Set the thread state.
|
// Set the thread state.
|
||||||
@ -353,7 +354,7 @@ void KThread::Unpin() {
|
|||||||
// Enable core migration.
|
// Enable core migration.
|
||||||
ASSERT(num_core_migration_disables == 1);
|
ASSERT(num_core_migration_disables == 1);
|
||||||
{
|
{
|
||||||
--num_core_migration_disables;
|
num_core_migration_disables--;
|
||||||
|
|
||||||
// Restore our original state.
|
// Restore our original state.
|
||||||
const KAffinityMask old_mask = physical_affinity_mask;
|
const KAffinityMask old_mask = physical_affinity_mask;
|
||||||
@ -494,8 +495,8 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
|
|||||||
|
|
||||||
// Update the pinned waiter list.
|
// Update the pinned waiter list.
|
||||||
{
|
{
|
||||||
bool retry_update = false;
|
bool retry_update{};
|
||||||
bool thread_is_pinned = false;
|
bool thread_is_pinned{};
|
||||||
do {
|
do {
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
@ -507,7 +508,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
|
|||||||
retry_update = false;
|
retry_update = false;
|
||||||
|
|
||||||
// Check if the thread is currently running.
|
// Check if the thread is currently running.
|
||||||
bool thread_is_current = false;
|
bool thread_is_current{};
|
||||||
s32 thread_core;
|
s32 thread_core;
|
||||||
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
|
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
|
||||||
++thread_core) {
|
++thread_core) {
|
||||||
@ -683,8 +684,8 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
|||||||
|
|
||||||
// If the thread is now paused, update the pinned waiter list.
|
// If the thread is now paused, update the pinned waiter list.
|
||||||
if (activity == Svc::ThreadActivity::Paused) {
|
if (activity == Svc::ThreadActivity::Paused) {
|
||||||
bool thread_is_pinned = false;
|
bool thread_is_pinned{};
|
||||||
bool thread_is_current;
|
bool thread_is_current{};
|
||||||
do {
|
do {
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
@ -534,6 +534,10 @@ public:
|
|||||||
return wait_reason_for_debugging;
|
return wait_reason_for_debugging;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
|
||||||
|
return thread_type_for_debugging;
|
||||||
|
}
|
||||||
|
|
||||||
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
|
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
|
||||||
wait_objects_for_debugging.clear();
|
wait_objects_for_debugging.clear();
|
||||||
wait_objects_for_debugging.reserve(objects.size());
|
wait_objects_for_debugging.reserve(objects.size());
|
||||||
@ -721,6 +725,7 @@ private:
|
|||||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||||
VAddr mutex_wait_address_for_debugging{};
|
VAddr mutex_wait_address_for_debugging{};
|
||||||
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
|
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
|
||||||
|
ThreadType thread_type_for_debugging{};
|
||||||
std::string name;
|
std::string name;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -93,8 +93,10 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
|
|||||||
std::size_t row = 0;
|
std::size_t row = 0;
|
||||||
auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::KThread>>& threads) {
|
auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::KThread>>& threads) {
|
||||||
for (std::size_t i = 0; i < threads.size(); ++i) {
|
for (std::size_t i = 0; i < threads.size(); ++i) {
|
||||||
|
if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) {
|
||||||
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
||||||
item_list.back()->row = row;
|
item_list.back()->row = row;
|
||||||
|
}
|
||||||
++row;
|
++row;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -148,6 +150,10 @@ QString WaitTreeCallstack::GetText() const {
|
|||||||
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
|
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
|
||||||
std::vector<std::unique_ptr<WaitTreeItem>> list;
|
std::vector<std::unique_ptr<WaitTreeItem>> list;
|
||||||
|
|
||||||
|
if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) {
|
||||||
|
return list;
|
||||||
|
}
|
||||||
|
|
||||||
if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) {
|
if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) {
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user