Merge pull request #436 from bunnei/multi-core
Initial support for multi-core
This commit is contained in:
commit
1b5c02fc37
|
@ -4,6 +4,8 @@ add_library(core STATIC
|
||||||
arm/unicorn/arm_unicorn.h
|
arm/unicorn/arm_unicorn.h
|
||||||
core.cpp
|
core.cpp
|
||||||
core.h
|
core.h
|
||||||
|
core_cpu.cpp
|
||||||
|
core_cpu.h
|
||||||
core_timing.cpp
|
core_timing.cpp
|
||||||
core_timing.h
|
core_timing.h
|
||||||
file_sys/directory.h
|
file_sys/directory.h
|
||||||
|
|
|
@ -52,7 +52,7 @@ static void InterruptHook(uc_engine* uc, u32 intNo, void* user_data) {
|
||||||
static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int size, u64 value,
|
static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int size, u64 value,
|
||||||
void* user_data) {
|
void* user_data) {
|
||||||
ARM_Interface::ThreadContext ctx{};
|
ARM_Interface::ThreadContext ctx{};
|
||||||
Core::CPU().SaveContext(ctx);
|
Core::CurrentArmInterface().SaveContext(ctx);
|
||||||
ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x{:X}, pc=0x{:X}, lr=0x{:X}", addr,
|
ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x{:X}, pc=0x{:X}, lr=0x{:X}", addr,
|
||||||
ctx.pc, ctx.cpu_registers[30]);
|
ctx.pc, ctx.cpu_registers[30]);
|
||||||
return {};
|
return {};
|
||||||
|
|
|
@ -5,10 +5,6 @@
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#ifdef ARCHITECTURE_x86_64
|
|
||||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
|
||||||
#endif
|
|
||||||
#include "core/arm/unicorn/arm_unicorn.h"
|
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/gdbstub/gdbstub.h"
|
#include "core/gdbstub/gdbstub.h"
|
||||||
|
@ -31,11 +27,31 @@ namespace Core {
|
||||||
|
|
||||||
System::~System() = default;
|
System::~System() = default;
|
||||||
|
|
||||||
|
/// Runs a CPU core while the system is powered on
|
||||||
|
static void RunCpuCore(std::shared_ptr<Cpu> cpu_state) {
|
||||||
|
while (Core::System().GetInstance().IsPoweredOn()) {
|
||||||
|
cpu_state->RunLoop(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Cpu& System::CurrentCpuCore() {
|
||||||
|
// If multicore is enabled, use host thread to figure out the current CPU core
|
||||||
|
if (Settings::values.use_multi_core) {
|
||||||
|
const auto& search = thread_to_cpu.find(std::this_thread::get_id());
|
||||||
|
ASSERT(search != thread_to_cpu.end());
|
||||||
|
ASSERT(search->second);
|
||||||
|
return *search->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, use single-threaded mode active_core variable
|
||||||
|
return *cpu_cores[active_core];
|
||||||
|
}
|
||||||
|
|
||||||
System::ResultStatus System::RunLoop(bool tight_loop) {
|
System::ResultStatus System::RunLoop(bool tight_loop) {
|
||||||
status = ResultStatus::Success;
|
status = ResultStatus::Success;
|
||||||
if (!cpu_core) {
|
|
||||||
return ResultStatus::ErrorNotInitialized;
|
// Update thread_to_cpu in case Core 0 is run from a different host thread
|
||||||
}
|
thread_to_cpu[std::this_thread::get_id()] = cpu_cores[0];
|
||||||
|
|
||||||
if (GDBStub::IsServerEnabled()) {
|
if (GDBStub::IsServerEnabled()) {
|
||||||
GDBStub::HandlePacket();
|
GDBStub::HandlePacket();
|
||||||
|
@ -52,25 +68,14 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we don't have a currently active thread then don't execute instructions,
|
for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
|
||||||
// instead advance to the next event and try to yield to the next thread
|
cpu_cores[active_core]->RunLoop(tight_loop);
|
||||||
if (Kernel::GetCurrentThread() == nullptr) {
|
if (Settings::values.use_multi_core) {
|
||||||
NGLOG_TRACE(Core_ARM, "Idling");
|
// Cores 1-3 are run on other threads in this mode
|
||||||
CoreTiming::Idle();
|
break;
|
||||||
CoreTiming::Advance();
|
|
||||||
PrepareReschedule();
|
|
||||||
} else {
|
|
||||||
CoreTiming::Advance();
|
|
||||||
if (tight_loop) {
|
|
||||||
cpu_core->Run();
|
|
||||||
} else {
|
|
||||||
cpu_core->Step();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HW::Update();
|
|
||||||
Reschedule();
|
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,21 +138,26 @@ System::ResultStatus System::Load(EmuWindow* emu_window, const std::string& file
|
||||||
}
|
}
|
||||||
|
|
||||||
void System::PrepareReschedule() {
|
void System::PrepareReschedule() {
|
||||||
cpu_core->PrepareReschedule();
|
CurrentCpuCore().PrepareReschedule();
|
||||||
reschedule_pending = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PerfStats::Results System::GetAndResetPerfStats() {
|
PerfStats::Results System::GetAndResetPerfStats() {
|
||||||
return perf_stats.GetAndResetStats(CoreTiming::GetGlobalTimeUs());
|
return perf_stats.GetAndResetStats(CoreTiming::GetGlobalTimeUs());
|
||||||
}
|
}
|
||||||
|
|
||||||
void System::Reschedule() {
|
const std::shared_ptr<Kernel::Scheduler>& System::Scheduler(size_t core_index) {
|
||||||
if (!reschedule_pending) {
|
ASSERT(core_index < NUM_CPU_CORES);
|
||||||
return;
|
return cpu_cores[core_index]->Scheduler();
|
||||||
}
|
}
|
||||||
|
|
||||||
reschedule_pending = false;
|
ARM_Interface& System::ArmInterface(size_t core_index) {
|
||||||
Core::System::GetInstance().Scheduler().Reschedule();
|
ASSERT(core_index < NUM_CPU_CORES);
|
||||||
|
return cpu_cores[core_index]->ArmInterface();
|
||||||
|
}
|
||||||
|
|
||||||
|
Cpu& System::CpuCore(size_t core_index) {
|
||||||
|
ASSERT(core_index < NUM_CPU_CORES);
|
||||||
|
return *cpu_cores[core_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
|
System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
|
||||||
|
@ -157,26 +167,17 @@ System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
|
||||||
|
|
||||||
current_process = Kernel::Process::Create("main");
|
current_process = Kernel::Process::Create("main");
|
||||||
|
|
||||||
if (Settings::values.use_cpu_jit) {
|
cpu_barrier = std::make_shared<CpuBarrier>();
|
||||||
#ifdef ARCHITECTURE_x86_64
|
for (size_t index = 0; index < cpu_cores.size(); ++index) {
|
||||||
cpu_core = std::make_shared<ARM_Dynarmic>();
|
cpu_cores[index] = std::make_shared<Cpu>(cpu_barrier, index);
|
||||||
#else
|
|
||||||
cpu_core = std::make_shared<ARM_Unicorn>();
|
|
||||||
NGLOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
cpu_core = std::make_shared<ARM_Unicorn>();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu_core = std::make_unique<Tegra::GPU>();
|
gpu_core = std::make_unique<Tegra::GPU>();
|
||||||
|
|
||||||
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
||||||
|
|
||||||
service_manager = std::make_shared<Service::SM::ServiceManager>();
|
service_manager = std::make_shared<Service::SM::ServiceManager>();
|
||||||
|
|
||||||
HW::Init();
|
HW::Init();
|
||||||
Kernel::Init(system_mode);
|
Kernel::Init(system_mode);
|
||||||
scheduler = std::make_unique<Kernel::Scheduler>(cpu_core.get());
|
|
||||||
Service::Init(service_manager);
|
Service::Init(service_manager);
|
||||||
GDBStub::Init();
|
GDBStub::Init();
|
||||||
|
|
||||||
|
@ -184,6 +185,17 @@ System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
|
||||||
return ResultStatus::ErrorVideoCore;
|
return ResultStatus::ErrorVideoCore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create threads for CPU cores 1-3, and build thread_to_cpu map
|
||||||
|
// CPU core 0 is run on the main thread
|
||||||
|
thread_to_cpu[std::this_thread::get_id()] = cpu_cores[0];
|
||||||
|
if (Settings::values.use_multi_core) {
|
||||||
|
for (size_t index = 0; index < cpu_core_threads.size(); ++index) {
|
||||||
|
cpu_core_threads[index] =
|
||||||
|
std::make_unique<std::thread>(RunCpuCore, cpu_cores[index + 1]);
|
||||||
|
thread_to_cpu[cpu_core_threads[index]->get_id()] = cpu_cores[index + 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
NGLOG_DEBUG(Core, "Initialized OK");
|
NGLOG_DEBUG(Core, "Initialized OK");
|
||||||
|
|
||||||
// Reset counters and set time origin to current frame
|
// Reset counters and set time origin to current frame
|
||||||
|
@ -207,15 +219,30 @@ void System::Shutdown() {
|
||||||
VideoCore::Shutdown();
|
VideoCore::Shutdown();
|
||||||
GDBStub::Shutdown();
|
GDBStub::Shutdown();
|
||||||
Service::Shutdown();
|
Service::Shutdown();
|
||||||
scheduler.reset();
|
|
||||||
Kernel::Shutdown();
|
Kernel::Shutdown();
|
||||||
HW::Shutdown();
|
HW::Shutdown();
|
||||||
service_manager.reset();
|
service_manager.reset();
|
||||||
telemetry_session.reset();
|
telemetry_session.reset();
|
||||||
gpu_core.reset();
|
gpu_core.reset();
|
||||||
cpu_core.reset();
|
|
||||||
|
// Close all CPU/threading state
|
||||||
|
cpu_barrier->NotifyEnd();
|
||||||
|
if (Settings::values.use_multi_core) {
|
||||||
|
for (auto& thread : cpu_core_threads) {
|
||||||
|
thread->join();
|
||||||
|
thread.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
thread_to_cpu.clear();
|
||||||
|
for (auto& cpu_core : cpu_cores) {
|
||||||
|
cpu_core.reset();
|
||||||
|
}
|
||||||
|
cpu_barrier.reset();
|
||||||
|
|
||||||
|
// Close core timing
|
||||||
CoreTiming::Shutdown();
|
CoreTiming::Shutdown();
|
||||||
|
|
||||||
|
// Close app loader
|
||||||
app_loader.reset();
|
app_loader.reset();
|
||||||
|
|
||||||
NGLOG_DEBUG(Core, "Shutdown OK");
|
NGLOG_DEBUG(Core, "Shutdown OK");
|
||||||
|
|
|
@ -4,9 +4,12 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "core/core_cpu.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
#include "core/hle/kernel/scheduler.h"
|
||||||
#include "core/loader/loader.h"
|
#include "core/loader/loader.h"
|
||||||
|
@ -89,7 +92,7 @@ public:
|
||||||
* @returns True if the emulated system is powered on, otherwise false.
|
* @returns True if the emulated system is powered on, otherwise false.
|
||||||
*/
|
*/
|
||||||
bool IsPoweredOn() const {
|
bool IsPoweredOn() const {
|
||||||
return cpu_core != nullptr;
|
return cpu_barrier && cpu_barrier->IsAlive();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -103,24 +106,34 @@ public:
|
||||||
/// Prepare the core emulation for a reschedule
|
/// Prepare the core emulation for a reschedule
|
||||||
void PrepareReschedule();
|
void PrepareReschedule();
|
||||||
|
|
||||||
|
/// Gets and resets core performance statistics
|
||||||
PerfStats::Results GetAndResetPerfStats();
|
PerfStats::Results GetAndResetPerfStats();
|
||||||
|
|
||||||
/**
|
/// Gets an ARM interface to the CPU core that is currently running
|
||||||
* Gets a reference to the emulated CPU.
|
ARM_Interface& CurrentArmInterface() {
|
||||||
* @returns A reference to the emulated CPU.
|
return CurrentCpuCore().ArmInterface();
|
||||||
*/
|
|
||||||
ARM_Interface& CPU() {
|
|
||||||
return *cpu_core;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets an ARM interface to the CPU core with the specified index
|
||||||
|
ARM_Interface& ArmInterface(size_t core_index);
|
||||||
|
|
||||||
|
/// Gets a CPU interface to the CPU core with the specified index
|
||||||
|
Cpu& CpuCore(size_t core_index);
|
||||||
|
|
||||||
|
/// Gets the GPU interface
|
||||||
Tegra::GPU& GPU() {
|
Tegra::GPU& GPU() {
|
||||||
return *gpu_core;
|
return *gpu_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Scheduler& Scheduler() {
|
/// Gets the scheduler for the CPU core that is currently running
|
||||||
return *scheduler;
|
Kernel::Scheduler& CurrentScheduler() {
|
||||||
|
return *CurrentCpuCore().Scheduler();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the scheduler for the CPU core with the specified index
|
||||||
|
const std::shared_ptr<Kernel::Scheduler>& Scheduler(size_t core_index);
|
||||||
|
|
||||||
|
/// Gets the current process
|
||||||
Kernel::SharedPtr<Kernel::Process>& CurrentProcess() {
|
Kernel::SharedPtr<Kernel::Process>& CurrentProcess() {
|
||||||
return current_process;
|
return current_process;
|
||||||
}
|
}
|
||||||
|
@ -155,6 +168,9 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
/// Returns the currently running CPU core
|
||||||
|
Cpu& CurrentCpuCore();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize the emulated system.
|
* Initialize the emulated system.
|
||||||
* @param emu_window Pointer to the host-system window used for video output and keyboard input.
|
* @param emu_window Pointer to the host-system window used for video output and keyboard input.
|
||||||
|
@ -163,22 +179,15 @@ private:
|
||||||
*/
|
*/
|
||||||
ResultStatus Init(EmuWindow* emu_window, u32 system_mode);
|
ResultStatus Init(EmuWindow* emu_window, u32 system_mode);
|
||||||
|
|
||||||
/// Reschedule the core emulation
|
|
||||||
void Reschedule();
|
|
||||||
|
|
||||||
/// AppLoader used to load the current executing application
|
/// AppLoader used to load the current executing application
|
||||||
std::unique_ptr<Loader::AppLoader> app_loader;
|
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||||
|
|
||||||
std::shared_ptr<ARM_Interface> cpu_core;
|
|
||||||
std::unique_ptr<Kernel::Scheduler> scheduler;
|
|
||||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||||
|
|
||||||
std::shared_ptr<Tegra::DebugContext> debug_context;
|
std::shared_ptr<Tegra::DebugContext> debug_context;
|
||||||
|
|
||||||
Kernel::SharedPtr<Kernel::Process> current_process;
|
Kernel::SharedPtr<Kernel::Process> current_process;
|
||||||
|
std::shared_ptr<CpuBarrier> cpu_barrier;
|
||||||
/// When true, signals that a reschedule should happen
|
std::array<std::shared_ptr<Cpu>, NUM_CPU_CORES> cpu_cores;
|
||||||
bool reschedule_pending{};
|
std::array<std::unique_ptr<std::thread>, NUM_CPU_CORES - 1> cpu_core_threads;
|
||||||
|
size_t active_core{}; ///< Active core, only used in single thread mode
|
||||||
|
|
||||||
/// Service manager
|
/// Service manager
|
||||||
std::shared_ptr<Service::SM::ServiceManager> service_manager;
|
std::shared_ptr<Service::SM::ServiceManager> service_manager;
|
||||||
|
@ -190,10 +199,13 @@ private:
|
||||||
|
|
||||||
ResultStatus status = ResultStatus::Success;
|
ResultStatus status = ResultStatus::Success;
|
||||||
std::string status_details = "";
|
std::string status_details = "";
|
||||||
|
|
||||||
|
/// Map of guest threads to CPU cores
|
||||||
|
std::map<std::thread::id, std::shared_ptr<Cpu>> thread_to_cpu;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline ARM_Interface& CPU() {
|
inline ARM_Interface& CurrentArmInterface() {
|
||||||
return System::GetInstance().CPU();
|
return System::GetInstance().CurrentArmInterface();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline TelemetrySession& Telemetry() {
|
inline TelemetrySession& Telemetry() {
|
||||||
|
|
|
@ -0,0 +1,119 @@
|
||||||
|
// Copyright 2018 yuzu emulator team
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
#ifdef ARCHITECTURE_x86_64
|
||||||
|
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||||
|
#endif
|
||||||
|
#include "core/arm/unicorn/arm_unicorn.h"
|
||||||
|
#include "core/core_cpu.h"
|
||||||
|
#include "core/core_timing.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/scheduler.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/settings.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
|
||||||
|
void CpuBarrier::NotifyEnd() {
|
||||||
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
end = true;
|
||||||
|
condition.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CpuBarrier::Rendezvous() {
|
||||||
|
if (!Settings::values.use_multi_core) {
|
||||||
|
// Meaningless when running in single-core mode
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!end) {
|
||||||
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
|
||||||
|
--cores_waiting;
|
||||||
|
if (!cores_waiting) {
|
||||||
|
cores_waiting = NUM_CPU_CORES;
|
||||||
|
condition.notify_all();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
condition.wait(lock);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Cpu::Cpu(std::shared_ptr<CpuBarrier> cpu_barrier, size_t core_index)
|
||||||
|
: cpu_barrier{std::move(cpu_barrier)}, core_index{core_index} {
|
||||||
|
|
||||||
|
if (Settings::values.use_cpu_jit) {
|
||||||
|
#ifdef ARCHITECTURE_x86_64
|
||||||
|
arm_interface = std::make_shared<ARM_Dynarmic>();
|
||||||
|
#else
|
||||||
|
cpu_core = std::make_shared<ARM_Unicorn>();
|
||||||
|
NGLOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
||||||
|
#endif
|
||||||
|
} else {
|
||||||
|
arm_interface = std::make_shared<ARM_Unicorn>();
|
||||||
|
}
|
||||||
|
|
||||||
|
scheduler = std::make_shared<Kernel::Scheduler>(arm_interface.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Cpu::RunLoop(bool tight_loop) {
|
||||||
|
// Wait for all other CPU cores to complete the previous slice, such that they run in lock-step
|
||||||
|
if (!cpu_barrier->Rendezvous()) {
|
||||||
|
// If rendezvous failed, session has been killed
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have a currently active thread then don't execute instructions,
|
||||||
|
// instead advance to the next event and try to yield to the next thread
|
||||||
|
if (Kernel::GetCurrentThread() == nullptr) {
|
||||||
|
NGLOG_TRACE(Core, "Core-{} idling", core_index);
|
||||||
|
|
||||||
|
if (IsMainCore()) {
|
||||||
|
CoreTiming::Idle();
|
||||||
|
CoreTiming::Advance();
|
||||||
|
}
|
||||||
|
|
||||||
|
PrepareReschedule();
|
||||||
|
} else {
|
||||||
|
if (IsMainCore()) {
|
||||||
|
CoreTiming::Advance();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tight_loop) {
|
||||||
|
arm_interface->Run();
|
||||||
|
} else {
|
||||||
|
arm_interface->Step();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Reschedule();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Cpu::SingleStep() {
|
||||||
|
return RunLoop(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Cpu::PrepareReschedule() {
|
||||||
|
arm_interface->PrepareReschedule();
|
||||||
|
reschedule_pending = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Cpu::Reschedule() {
|
||||||
|
if (!reschedule_pending) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
reschedule_pending = false;
|
||||||
|
scheduler->Reschedule();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Core
|
|
@ -0,0 +1,78 @@
|
||||||
|
// Copyright 2018 yuzu emulator team
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <string>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
class ARM_Interface;
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
class Scheduler;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
|
||||||
|
constexpr unsigned NUM_CPU_CORES{4};
|
||||||
|
|
||||||
|
class CpuBarrier {
|
||||||
|
public:
|
||||||
|
bool IsAlive() const {
|
||||||
|
return !end;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NotifyEnd();
|
||||||
|
|
||||||
|
bool Rendezvous();
|
||||||
|
|
||||||
|
private:
|
||||||
|
unsigned cores_waiting{NUM_CPU_CORES};
|
||||||
|
std::mutex mutex;
|
||||||
|
std::condition_variable condition;
|
||||||
|
std::atomic<bool> end{};
|
||||||
|
};
|
||||||
|
|
||||||
|
class Cpu {
|
||||||
|
public:
|
||||||
|
Cpu(std::shared_ptr<CpuBarrier> cpu_barrier, size_t core_index);
|
||||||
|
|
||||||
|
void RunLoop(bool tight_loop = true);
|
||||||
|
|
||||||
|
void SingleStep();
|
||||||
|
|
||||||
|
void PrepareReschedule();
|
||||||
|
|
||||||
|
ARM_Interface& ArmInterface() {
|
||||||
|
return *arm_interface;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ARM_Interface& ArmInterface() const {
|
||||||
|
return *arm_interface;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::shared_ptr<Kernel::Scheduler>& Scheduler() const {
|
||||||
|
return scheduler;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsMainCore() const {
|
||||||
|
return core_index == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void Reschedule();
|
||||||
|
|
||||||
|
std::shared_ptr<ARM_Interface> arm_interface;
|
||||||
|
std::shared_ptr<CpuBarrier> cpu_barrier;
|
||||||
|
std::shared_ptr<Kernel::Scheduler> scheduler;
|
||||||
|
|
||||||
|
bool reschedule_pending{};
|
||||||
|
size_t core_index;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Core
|
|
@ -598,11 +598,11 @@ static void ReadRegister() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (id <= SP_REGISTER) {
|
if (id <= SP_REGISTER) {
|
||||||
LongToGdbHex(reply, Core::CPU().GetReg(static_cast<int>(id)));
|
LongToGdbHex(reply, Core::CurrentArmInterface().GetReg(static_cast<int>(id)));
|
||||||
} else if (id == PC_REGISTER) {
|
} else if (id == PC_REGISTER) {
|
||||||
LongToGdbHex(reply, Core::CPU().GetPC());
|
LongToGdbHex(reply, Core::CurrentArmInterface().GetPC());
|
||||||
} else if (id == CPSR_REGISTER) {
|
} else if (id == CPSR_REGISTER) {
|
||||||
IntToGdbHex(reply, Core::CPU().GetCPSR());
|
IntToGdbHex(reply, Core::CurrentArmInterface().GetCPSR());
|
||||||
} else {
|
} else {
|
||||||
return SendReply("E01");
|
return SendReply("E01");
|
||||||
}
|
}
|
||||||
|
@ -618,16 +618,16 @@ static void ReadRegisters() {
|
||||||
u8* bufptr = buffer;
|
u8* bufptr = buffer;
|
||||||
|
|
||||||
for (int reg = 0; reg <= SP_REGISTER; reg++) {
|
for (int reg = 0; reg <= SP_REGISTER; reg++) {
|
||||||
LongToGdbHex(bufptr + reg * 16, Core::CPU().GetReg(reg));
|
LongToGdbHex(bufptr + reg * 16, Core::CurrentArmInterface().GetReg(reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
bufptr += (32 * 16);
|
bufptr += (32 * 16);
|
||||||
|
|
||||||
LongToGdbHex(bufptr, Core::CPU().GetPC());
|
LongToGdbHex(bufptr, Core::CurrentArmInterface().GetPC());
|
||||||
|
|
||||||
bufptr += 16;
|
bufptr += 16;
|
||||||
|
|
||||||
IntToGdbHex(bufptr, Core::CPU().GetCPSR());
|
IntToGdbHex(bufptr, Core::CurrentArmInterface().GetCPSR());
|
||||||
|
|
||||||
bufptr += 8;
|
bufptr += 8;
|
||||||
|
|
||||||
|
@ -646,11 +646,11 @@ static void WriteRegister() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (id <= SP_REGISTER) {
|
if (id <= SP_REGISTER) {
|
||||||
Core::CPU().SetReg(id, GdbHexToLong(buffer_ptr));
|
Core::CurrentArmInterface().SetReg(id, GdbHexToLong(buffer_ptr));
|
||||||
} else if (id == PC_REGISTER) {
|
} else if (id == PC_REGISTER) {
|
||||||
Core::CPU().SetPC(GdbHexToLong(buffer_ptr));
|
Core::CurrentArmInterface().SetPC(GdbHexToLong(buffer_ptr));
|
||||||
} else if (id == CPSR_REGISTER) {
|
} else if (id == CPSR_REGISTER) {
|
||||||
Core::CPU().SetCPSR(GdbHexToInt(buffer_ptr));
|
Core::CurrentArmInterface().SetCPSR(GdbHexToInt(buffer_ptr));
|
||||||
} else {
|
} else {
|
||||||
return SendReply("E01");
|
return SendReply("E01");
|
||||||
}
|
}
|
||||||
|
@ -667,11 +667,11 @@ static void WriteRegisters() {
|
||||||
|
|
||||||
for (int i = 0, reg = 0; reg <= CPSR_REGISTER; i++, reg++) {
|
for (int i = 0, reg = 0; reg <= CPSR_REGISTER; i++, reg++) {
|
||||||
if (reg <= SP_REGISTER) {
|
if (reg <= SP_REGISTER) {
|
||||||
Core::CPU().SetReg(reg, GdbHexToLong(buffer_ptr + i * 16));
|
Core::CurrentArmInterface().SetReg(reg, GdbHexToLong(buffer_ptr + i * 16));
|
||||||
} else if (reg == PC_REGISTER) {
|
} else if (reg == PC_REGISTER) {
|
||||||
Core::CPU().SetPC(GdbHexToLong(buffer_ptr + i * 16));
|
Core::CurrentArmInterface().SetPC(GdbHexToLong(buffer_ptr + i * 16));
|
||||||
} else if (reg == CPSR_REGISTER) {
|
} else if (reg == CPSR_REGISTER) {
|
||||||
Core::CPU().SetCPSR(GdbHexToInt(buffer_ptr + i * 16));
|
Core::CurrentArmInterface().SetCPSR(GdbHexToInt(buffer_ptr + i * 16));
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
std::mutex Scheduler::scheduler_mutex;
|
||||||
|
|
||||||
Scheduler::Scheduler(ARM_Interface* cpu_core) : cpu_core(cpu_core) {}
|
Scheduler::Scheduler(ARM_Interface* cpu_core) : cpu_core(cpu_core) {}
|
||||||
|
|
||||||
Scheduler::~Scheduler() {
|
Scheduler::~Scheduler() {
|
||||||
|
@ -18,6 +20,7 @@ Scheduler::~Scheduler() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Scheduler::HaveReadyThreads() {
|
bool Scheduler::HaveReadyThreads() {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
return ready_queue.get_first() != nullptr;
|
return ready_queue.get_first() != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,6 +93,8 @@ void Scheduler::SwitchContext(Thread* new_thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::Reschedule() {
|
void Scheduler::Reschedule() {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
|
|
||||||
Thread* cur = GetCurrentThread();
|
Thread* cur = GetCurrentThread();
|
||||||
Thread* next = PopNextReadyThread();
|
Thread* next = PopNextReadyThread();
|
||||||
|
|
||||||
|
@ -105,26 +110,36 @@ void Scheduler::Reschedule() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
|
void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
|
|
||||||
thread_list.push_back(thread);
|
thread_list.push_back(thread);
|
||||||
ready_queue.prepare(priority);
|
ready_queue.prepare(priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::RemoveThread(Thread* thread) {
|
void Scheduler::RemoveThread(Thread* thread) {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
|
|
||||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||||
thread_list.end());
|
thread_list.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
|
|
||||||
ASSERT(thread->status == THREADSTATUS_READY);
|
ASSERT(thread->status == THREADSTATUS_READY);
|
||||||
ready_queue.push_back(priority, thread);
|
ready_queue.push_back(priority, thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
|
|
||||||
ASSERT(thread->status == THREADSTATUS_READY);
|
ASSERT(thread->status == THREADSTATUS_READY);
|
||||||
ready_queue.remove(priority, thread);
|
ready_queue.remove(priority, thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
||||||
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||||
|
|
||||||
// If thread was ready, adjust queues
|
// If thread was ready, adjust queues
|
||||||
if (thread->status == THREADSTATUS_READY)
|
if (thread->status == THREADSTATUS_READY)
|
||||||
ready_queue.move(thread, thread->current_priority, priority);
|
ready_queue.move(thread, thread->current_priority, priority);
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/thread_queue_list.h"
|
#include "common/thread_queue_list.h"
|
||||||
|
@ -68,6 +69,8 @@ private:
|
||||||
SharedPtr<Thread> current_thread = nullptr;
|
SharedPtr<Thread> current_thread = nullptr;
|
||||||
|
|
||||||
ARM_Interface* cpu_core;
|
ARM_Interface* cpu_core;
|
||||||
|
|
||||||
|
static std::mutex scheduler_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -401,8 +401,8 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
|
||||||
|
|
||||||
/// Get which CPU core is executing the current thread
|
/// Get which CPU core is executing the current thread
|
||||||
static u32 GetCurrentProcessorNumber() {
|
static u32 GetCurrentProcessorNumber() {
|
||||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, defaulting to processor 0");
|
NGLOG_TRACE(Kernel_SVC, "called");
|
||||||
return 0;
|
return GetCurrentThread()->processor_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
|
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
|
||||||
|
@ -485,22 +485,28 @@ static void ExitProcess() {
|
||||||
|
|
||||||
Core::CurrentProcess()->status = ProcessStatus::Exited;
|
Core::CurrentProcess()->status = ProcessStatus::Exited;
|
||||||
|
|
||||||
// Stop all the process threads that are currently waiting for objects.
|
auto stop_threads = [](const std::vector<SharedPtr<Thread>>& thread_list) {
|
||||||
auto& thread_list = Core::System::GetInstance().Scheduler().GetThreadList();
|
for (auto& thread : thread_list) {
|
||||||
for (auto& thread : thread_list) {
|
if (thread->owner_process != Core::CurrentProcess())
|
||||||
if (thread->owner_process != Core::CurrentProcess())
|
continue;
|
||||||
continue;
|
|
||||||
|
|
||||||
if (thread == GetCurrentThread())
|
if (thread == GetCurrentThread())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// TODO(Subv): When are the other running/ready threads terminated?
|
// TODO(Subv): When are the other running/ready threads terminated?
|
||||||
ASSERT_MSG(thread->status == THREADSTATUS_WAIT_SYNCH_ANY ||
|
ASSERT_MSG(thread->status == THREADSTATUS_WAIT_SYNCH_ANY ||
|
||||||
thread->status == THREADSTATUS_WAIT_SYNCH_ALL,
|
thread->status == THREADSTATUS_WAIT_SYNCH_ALL,
|
||||||
"Exiting processes with non-waiting threads is currently unimplemented");
|
"Exiting processes with non-waiting threads is currently unimplemented");
|
||||||
|
|
||||||
thread->Stop();
|
thread->Stop();
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto& system = Core::System::GetInstance();
|
||||||
|
stop_threads(system.Scheduler(0)->GetThreadList());
|
||||||
|
stop_threads(system.Scheduler(1)->GetThreadList());
|
||||||
|
stop_threads(system.Scheduler(2)->GetThreadList());
|
||||||
|
stop_threads(system.Scheduler(3)->GetThreadList());
|
||||||
|
|
||||||
// Kill the current thread
|
// Kill the current thread
|
||||||
GetCurrentThread()->Stop();
|
GetCurrentThread()->Stop();
|
||||||
|
@ -530,14 +536,9 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V
|
||||||
|
|
||||||
switch (processor_id) {
|
switch (processor_id) {
|
||||||
case THREADPROCESSORID_0:
|
case THREADPROCESSORID_0:
|
||||||
break;
|
|
||||||
case THREADPROCESSORID_1:
|
case THREADPROCESSORID_1:
|
||||||
case THREADPROCESSORID_2:
|
case THREADPROCESSORID_2:
|
||||||
case THREADPROCESSORID_3:
|
case THREADPROCESSORID_3:
|
||||||
// TODO(bunnei): Implement support for other processor IDs
|
|
||||||
NGLOG_ERROR(Kernel_SVC,
|
|
||||||
"Newly created thread must run in another thread ({}), unimplemented.",
|
|
||||||
processor_id);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT_MSG(false, "Unsupported thread processor ID: {}", processor_id);
|
ASSERT_MSG(false, "Unsupported thread processor ID: {}", processor_id);
|
||||||
|
@ -576,7 +577,7 @@ static ResultCode StartThread(Handle thread_handle) {
|
||||||
|
|
||||||
/// Called when a thread exits
|
/// Called when a thread exits
|
||||||
static void ExitThread() {
|
static void ExitThread() {
|
||||||
NGLOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CPU().GetPC());
|
NGLOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC());
|
||||||
|
|
||||||
ExitCurrentThread();
|
ExitCurrentThread();
|
||||||
Core::System::GetInstance().PrepareReschedule();
|
Core::System::GetInstance().PrepareReschedule();
|
||||||
|
@ -588,7 +589,7 @@ static void SleepThread(s64 nanoseconds) {
|
||||||
|
|
||||||
// Don't attempt to yield execution if there are no available threads to run,
|
// Don't attempt to yield execution if there are no available threads to run,
|
||||||
// this way we avoid a useless reschedule to the idle thread.
|
// this way we avoid a useless reschedule to the idle thread.
|
||||||
if (nanoseconds == 0 && !Core::System::GetInstance().Scheduler().HaveReadyThreads())
|
if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Sleep current thread and check for next thread to schedule
|
// Sleep current thread and check for next thread to schedule
|
||||||
|
@ -624,7 +625,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
|
||||||
|
|
||||||
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
||||||
|
|
||||||
Core::System::GetInstance().PrepareReschedule();
|
Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule();
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,53 +635,60 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||||
condition_variable_addr, target);
|
condition_variable_addr, target);
|
||||||
|
|
||||||
u32 processed = 0;
|
u32 processed = 0;
|
||||||
auto& thread_list = Core::System::GetInstance().Scheduler().GetThreadList();
|
|
||||||
|
|
||||||
for (auto& thread : thread_list) {
|
auto signal_process_wide_key = [&](size_t core_index) {
|
||||||
if (thread->condvar_wait_address != condition_variable_addr)
|
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||||
continue;
|
for (auto& thread : scheduler->GetThreadList()) {
|
||||||
|
if (thread->condvar_wait_address != condition_variable_addr)
|
||||||
|
continue;
|
||||||
|
|
||||||
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
||||||
// them all.
|
// them all.
|
||||||
if (target != -1 && processed >= target)
|
if (target != -1 && processed >= target)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// If the mutex is not yet acquired, acquire it.
|
// If the mutex is not yet acquired, acquire it.
|
||||||
u32 mutex_val = Memory::Read32(thread->mutex_wait_address);
|
u32 mutex_val = Memory::Read32(thread->mutex_wait_address);
|
||||||
|
|
||||||
if (mutex_val == 0) {
|
if (mutex_val == 0) {
|
||||||
// We were able to acquire the mutex, resume this thread.
|
// We were able to acquire the mutex, resume this thread.
|
||||||
Memory::Write32(thread->mutex_wait_address, thread->wait_handle);
|
Memory::Write32(thread->mutex_wait_address, thread->wait_handle);
|
||||||
ASSERT(thread->status == THREADSTATUS_WAIT_MUTEX);
|
ASSERT(thread->status == THREADSTATUS_WAIT_MUTEX);
|
||||||
thread->ResumeFromWait();
|
thread->ResumeFromWait();
|
||||||
|
|
||||||
auto lock_owner = thread->lock_owner;
|
auto lock_owner = thread->lock_owner;
|
||||||
if (lock_owner)
|
if (lock_owner)
|
||||||
lock_owner->RemoveMutexWaiter(thread);
|
lock_owner->RemoveMutexWaiter(thread);
|
||||||
|
|
||||||
thread->lock_owner = nullptr;
|
thread->lock_owner = nullptr;
|
||||||
thread->mutex_wait_address = 0;
|
thread->mutex_wait_address = 0;
|
||||||
thread->condvar_wait_address = 0;
|
thread->condvar_wait_address = 0;
|
||||||
thread->wait_handle = 0;
|
thread->wait_handle = 0;
|
||||||
} else {
|
} else {
|
||||||
// Couldn't acquire the mutex, block the thread.
|
// Couldn't acquire the mutex, block the thread.
|
||||||
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
||||||
auto owner = g_handle_table.Get<Thread>(owner_handle);
|
auto owner = g_handle_table.Get<Thread>(owner_handle);
|
||||||
ASSERT(owner);
|
ASSERT(owner);
|
||||||
ASSERT(thread->status != THREADSTATUS_RUNNING);
|
ASSERT(thread->status != THREADSTATUS_RUNNING);
|
||||||
thread->status = THREADSTATUS_WAIT_MUTEX;
|
thread->status = THREADSTATUS_WAIT_MUTEX;
|
||||||
thread->wakeup_callback = nullptr;
|
thread->wakeup_callback = nullptr;
|
||||||
|
|
||||||
// Signal that the mutex now has a waiting thread.
|
// Signal that the mutex now has a waiting thread.
|
||||||
Memory::Write32(thread->mutex_wait_address, mutex_val | Mutex::MutexHasWaitersFlag);
|
Memory::Write32(thread->mutex_wait_address, mutex_val | Mutex::MutexHasWaitersFlag);
|
||||||
|
|
||||||
owner->AddMutexWaiter(thread);
|
owner->AddMutexWaiter(thread);
|
||||||
|
|
||||||
Core::System::GetInstance().PrepareReschedule();
|
Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule();
|
||||||
|
}
|
||||||
|
|
||||||
|
++processed;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
++processed;
|
signal_process_wide_key(0);
|
||||||
}
|
signal_process_wide_key(1);
|
||||||
|
signal_process_wide_key(2);
|
||||||
|
signal_process_wide_key(3);
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -718,16 +726,31 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ResultCode GetThreadCoreMask(Handle handle, u32* mask, u64* unknown) {
|
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
|
||||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}", handle);
|
NGLOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
||||||
*mask = 0x0;
|
|
||||||
*unknown = 0xf;
|
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
|
||||||
|
if (!thread) {
|
||||||
|
return ERR_INVALID_HANDLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
*core = thread->ideal_core;
|
||||||
|
*mask = thread->affinity_mask;
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ResultCode SetThreadCoreMask(Handle handle, u32 mask, u64 unknown) {
|
static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
|
||||||
NGLOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}, mask=0x{:08X}, unknown=0x{:X}",
|
NGLOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}, mask=0x{:08X}, core=0x{:X}", thread_handle,
|
||||||
handle, mask, unknown);
|
mask, core);
|
||||||
|
|
||||||
|
const SharedPtr<Thread> thread = g_handle_table.Get<Thread>(thread_handle);
|
||||||
|
if (!thread) {
|
||||||
|
return ERR_INVALID_HANDLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
thread->ChangeCore(core, mask);
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,14 +13,14 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
#define PARAM(n) Core::CPU().GetReg(n)
|
#define PARAM(n) Core::CurrentArmInterface().GetReg(n)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HLE a function return from the current ARM userland process
|
* HLE a function return from the current ARM userland process
|
||||||
* @param res Result to return
|
* @param res Result to return
|
||||||
*/
|
*/
|
||||||
static inline void FuncReturn(u64 res) {
|
static inline void FuncReturn(u64 res) {
|
||||||
Core::CPU().SetReg(0, res);
|
Core::CurrentArmInterface().SetReg(0, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -45,7 +45,7 @@ template <ResultCode func(u32*, u32)>
|
||||||
void SvcWrap() {
|
void SvcWrap() {
|
||||||
u32 param_1 = 0;
|
u32 param_1 = 0;
|
||||||
u32 retval = func(¶m_1, (u32)PARAM(1)).raw;
|
u32 retval = func(¶m_1, (u32)PARAM(1)).raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ template <ResultCode func(u32*, u64)>
|
||||||
void SvcWrap() {
|
void SvcWrap() {
|
||||||
u32 param_1 = 0;
|
u32 param_1 = 0;
|
||||||
u32 retval = func(¶m_1, PARAM(1)).raw;
|
u32 retval = func(¶m_1, PARAM(1)).raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ template <ResultCode func(u64*, u64)>
|
||||||
void SvcWrap() {
|
void SvcWrap() {
|
||||||
u64 param_1 = 0;
|
u64 param_1 = 0;
|
||||||
u32 retval = func(¶m_1, PARAM(1)).raw;
|
u32 retval = func(¶m_1, PARAM(1)).raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,8 +85,8 @@ void SvcWrap() {
|
||||||
u32 param_1 = 0;
|
u32 param_1 = 0;
|
||||||
u64 param_2 = 0;
|
u64 param_2 = 0;
|
||||||
ResultCode retval = func((u32)(PARAM(2) & 0xFFFFFFFF), ¶m_1, ¶m_2);
|
ResultCode retval = func((u32)(PARAM(2) & 0xFFFFFFFF), ¶m_1, ¶m_2);
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
Core::CPU().SetReg(2, param_2);
|
Core::CurrentArmInterface().SetReg(2, param_2);
|
||||||
FuncReturn(retval.raw);
|
FuncReturn(retval.raw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ template <ResultCode func(u32*, u64, u64, s64)>
|
||||||
void SvcWrap() {
|
void SvcWrap() {
|
||||||
u32 param_1 = 0;
|
u32 param_1 = 0;
|
||||||
ResultCode retval = func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3));
|
ResultCode retval = func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3));
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval.raw);
|
FuncReturn(retval.raw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ template <ResultCode func(u64*, u64, u64, u64)>
|
||||||
void SvcWrap() {
|
void SvcWrap() {
|
||||||
u64 param_1 = 0;
|
u64 param_1 = 0;
|
||||||
u32 retval = func(¶m_1, PARAM(1), PARAM(2), PARAM(3)).raw;
|
u32 retval = func(¶m_1, PARAM(1), PARAM(2), PARAM(3)).raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ void SvcWrap() {
|
||||||
u32 retval =
|
u32 retval =
|
||||||
func(¶m_1, PARAM(1), PARAM(2), PARAM(3), (u32)PARAM(4), (s32)(PARAM(5) & 0xFFFFFFFF))
|
func(¶m_1, PARAM(1), PARAM(2), PARAM(3), (u32)PARAM(4), (s32)(PARAM(5) & 0xFFFFFFFF))
|
||||||
.raw;
|
.raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ template <ResultCode func(u32*, u64, u64, u32)>
|
||||||
void SvcWrap() {
|
void SvcWrap() {
|
||||||
u32 param_1 = 0;
|
u32 param_1 = 0;
|
||||||
u32 retval = func(¶m_1, PARAM(1), PARAM(2), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
|
u32 retval = func(¶m_1, PARAM(1), PARAM(2), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ void SvcWrap() {
|
||||||
u32 param_1 = 0;
|
u32 param_1 = 0;
|
||||||
u32 retval =
|
u32 retval =
|
||||||
func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
|
func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
|
||||||
Core::CPU().SetReg(1, param_1);
|
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||||
FuncReturn(retval);
|
FuncReturn(retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ void Thread::Stop() {
|
||||||
// Clean up thread from ready queue
|
// Clean up thread from ready queue
|
||||||
// This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
|
// This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
|
||||||
if (status == THREADSTATUS_READY) {
|
if (status == THREADSTATUS_READY) {
|
||||||
Core::System::GetInstance().Scheduler().UnscheduleThread(this, current_priority);
|
scheduler->UnscheduleThread(this, current_priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
status = THREADSTATUS_DEAD;
|
status = THREADSTATUS_DEAD;
|
||||||
|
@ -92,7 +92,7 @@ void WaitCurrentThread_Sleep() {
|
||||||
void ExitCurrentThread() {
|
void ExitCurrentThread() {
|
||||||
Thread* thread = GetCurrentThread();
|
Thread* thread = GetCurrentThread();
|
||||||
thread->Stop();
|
thread->Stop();
|
||||||
Core::System::GetInstance().Scheduler().RemoveThread(thread);
|
Core::System::GetInstance().CurrentScheduler().RemoveThread(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -154,6 +154,18 @@ void Thread::CancelWakeupTimer() {
|
||||||
CoreTiming::UnscheduleEvent(ThreadWakeupEventType, callback_handle);
|
CoreTiming::UnscheduleEvent(ThreadWakeupEventType, callback_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static boost::optional<s32> GetNextProcessorId(u64 mask) {
|
||||||
|
for (s32 index = 0; index < Core::NUM_CPU_CORES; ++index) {
|
||||||
|
if (mask & (1ULL << index)) {
|
||||||
|
if (!Core::System().GetInstance().Scheduler(index)->GetCurrentThread()) {
|
||||||
|
// Core is enabled and not running any threads, use this one
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
void Thread::ResumeFromWait() {
|
void Thread::ResumeFromWait() {
|
||||||
ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects");
|
ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects");
|
||||||
|
|
||||||
|
@ -188,8 +200,37 @@ void Thread::ResumeFromWait() {
|
||||||
wakeup_callback = nullptr;
|
wakeup_callback = nullptr;
|
||||||
|
|
||||||
status = THREADSTATUS_READY;
|
status = THREADSTATUS_READY;
|
||||||
Core::System::GetInstance().Scheduler().ScheduleThread(this, current_priority);
|
|
||||||
Core::System::GetInstance().PrepareReschedule();
|
boost::optional<s32> new_processor_id = GetNextProcessorId(affinity_mask);
|
||||||
|
if (!new_processor_id) {
|
||||||
|
new_processor_id = processor_id;
|
||||||
|
}
|
||||||
|
if (ideal_core != -1 &&
|
||||||
|
Core::System().GetInstance().Scheduler(ideal_core)->GetCurrentThread() == nullptr) {
|
||||||
|
new_processor_id = ideal_core;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(*new_processor_id < 4);
|
||||||
|
|
||||||
|
// Add thread to new core's scheduler
|
||||||
|
auto& next_scheduler = Core::System().GetInstance().Scheduler(*new_processor_id);
|
||||||
|
|
||||||
|
if (*new_processor_id != processor_id) {
|
||||||
|
// Remove thread from previous core's scheduler
|
||||||
|
scheduler->RemoveThread(this);
|
||||||
|
next_scheduler->AddThread(this, current_priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
processor_id = *new_processor_id;
|
||||||
|
|
||||||
|
// If the thread was ready, unschedule from the previous core and schedule on the new core
|
||||||
|
scheduler->UnscheduleThread(this, current_priority);
|
||||||
|
next_scheduler->ScheduleThread(this, current_priority);
|
||||||
|
|
||||||
|
// Change thread's scheduler
|
||||||
|
scheduler = next_scheduler;
|
||||||
|
|
||||||
|
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -259,8 +300,6 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
|
||||||
|
|
||||||
SharedPtr<Thread> thread(new Thread);
|
SharedPtr<Thread> thread(new Thread);
|
||||||
|
|
||||||
Core::System::GetInstance().Scheduler().AddThread(thread, priority);
|
|
||||||
|
|
||||||
thread->thread_id = NewThreadId();
|
thread->thread_id = NewThreadId();
|
||||||
thread->status = THREADSTATUS_DORMANT;
|
thread->status = THREADSTATUS_DORMANT;
|
||||||
thread->entry_point = entry_point;
|
thread->entry_point = entry_point;
|
||||||
|
@ -268,6 +307,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
|
||||||
thread->nominal_priority = thread->current_priority = priority;
|
thread->nominal_priority = thread->current_priority = priority;
|
||||||
thread->last_running_ticks = CoreTiming::GetTicks();
|
thread->last_running_ticks = CoreTiming::GetTicks();
|
||||||
thread->processor_id = processor_id;
|
thread->processor_id = processor_id;
|
||||||
|
thread->ideal_core = processor_id;
|
||||||
|
thread->affinity_mask = 1ULL << processor_id;
|
||||||
thread->wait_objects.clear();
|
thread->wait_objects.clear();
|
||||||
thread->mutex_wait_address = 0;
|
thread->mutex_wait_address = 0;
|
||||||
thread->condvar_wait_address = 0;
|
thread->condvar_wait_address = 0;
|
||||||
|
@ -275,6 +316,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
|
||||||
thread->name = std::move(name);
|
thread->name = std::move(name);
|
||||||
thread->callback_handle = wakeup_callback_handle_table.Create(thread).Unwrap();
|
thread->callback_handle = wakeup_callback_handle_table.Create(thread).Unwrap();
|
||||||
thread->owner_process = owner_process;
|
thread->owner_process = owner_process;
|
||||||
|
thread->scheduler = Core::System().GetInstance().Scheduler(processor_id);
|
||||||
|
thread->scheduler->AddThread(thread, priority);
|
||||||
|
|
||||||
// Find the next available TLS index, and mark it as used
|
// Find the next available TLS index, and mark it as used
|
||||||
auto& tls_slots = owner_process->tls_slots;
|
auto& tls_slots = owner_process->tls_slots;
|
||||||
|
@ -337,7 +380,7 @@ void Thread::SetPriority(u32 priority) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::BoostPriority(u32 priority) {
|
void Thread::BoostPriority(u32 priority) {
|
||||||
Core::System::GetInstance().Scheduler().SetThreadPriority(this, priority);
|
scheduler->SetThreadPriority(this, priority);
|
||||||
current_priority = priority;
|
current_priority = priority;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,7 +449,7 @@ void Thread::UpdatePriority() {
|
||||||
if (new_priority == current_priority)
|
if (new_priority == current_priority)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
Core::System::GetInstance().Scheduler().SetThreadPriority(this, new_priority);
|
scheduler->SetThreadPriority(this, new_priority);
|
||||||
|
|
||||||
current_priority = new_priority;
|
current_priority = new_priority;
|
||||||
|
|
||||||
|
@ -415,13 +458,54 @@ void Thread::UpdatePriority() {
|
||||||
lock_owner->UpdatePriority();
|
lock_owner->UpdatePriority();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Thread::ChangeCore(u32 core, u64 mask) {
|
||||||
|
ideal_core = core;
|
||||||
|
mask = mask;
|
||||||
|
|
||||||
|
if (status != THREADSTATUS_READY) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
boost::optional<s32> new_processor_id{GetNextProcessorId(mask)};
|
||||||
|
|
||||||
|
if (!new_processor_id) {
|
||||||
|
new_processor_id = processor_id;
|
||||||
|
}
|
||||||
|
if (ideal_core != -1 &&
|
||||||
|
Core::System().GetInstance().Scheduler(ideal_core)->GetCurrentThread() == nullptr) {
|
||||||
|
new_processor_id = ideal_core;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(new_processor_id < 4);
|
||||||
|
|
||||||
|
// Add thread to new core's scheduler
|
||||||
|
auto& next_scheduler = Core::System().GetInstance().Scheduler(*new_processor_id);
|
||||||
|
|
||||||
|
if (*new_processor_id != processor_id) {
|
||||||
|
// Remove thread from previous core's scheduler
|
||||||
|
scheduler->RemoveThread(this);
|
||||||
|
next_scheduler->AddThread(this, current_priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
processor_id = *new_processor_id;
|
||||||
|
|
||||||
|
// If the thread was ready, unschedule from the previous core and schedule on the new core
|
||||||
|
scheduler->UnscheduleThread(this, current_priority);
|
||||||
|
next_scheduler->ScheduleThread(this, current_priority);
|
||||||
|
|
||||||
|
// Change thread's scheduler
|
||||||
|
scheduler = next_scheduler;
|
||||||
|
|
||||||
|
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the current thread
|
* Gets the current thread
|
||||||
*/
|
*/
|
||||||
Thread* GetCurrentThread() {
|
Thread* GetCurrentThread() {
|
||||||
return Core::System::GetInstance().Scheduler().GetCurrentThread();
|
return Core::System::GetInstance().CurrentScheduler().GetCurrentThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadingInit() {
|
void ThreadingInit() {
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
@ -56,6 +57,7 @@ enum class ThreadWakeupReason {
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class Process;
|
class Process;
|
||||||
|
class Scheduler;
|
||||||
|
|
||||||
class Thread final : public WaitObject {
|
class Thread final : public WaitObject {
|
||||||
public:
|
public:
|
||||||
|
@ -118,6 +120,9 @@ public:
|
||||||
/// Recalculates the current priority taking into account priority inheritance.
|
/// Recalculates the current priority taking into account priority inheritance.
|
||||||
void UpdatePriority();
|
void UpdatePriority();
|
||||||
|
|
||||||
|
/// Changes the core that the thread is running or scheduled to run on.
|
||||||
|
void ChangeCore(u32 core, u64 mask);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the thread's thread ID
|
* Gets the thread's thread ID
|
||||||
* @return The thread's ID
|
* @return The thread's ID
|
||||||
|
@ -240,6 +245,11 @@ public:
|
||||||
// available. In case of a timeout, the object will be nullptr.
|
// available. In case of a timeout, the object will be nullptr.
|
||||||
std::function<WakeupCallback> wakeup_callback;
|
std::function<WakeupCallback> wakeup_callback;
|
||||||
|
|
||||||
|
std::shared_ptr<Scheduler> scheduler;
|
||||||
|
|
||||||
|
u32 ideal_core{0xFFFFFFFF};
|
||||||
|
u64 affinity_mask{0x1};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Thread();
|
Thread();
|
||||||
~Thread() override;
|
~Thread() override;
|
||||||
|
|
|
@ -104,8 +104,15 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
|
||||||
VirtualMemoryArea& final_vma = vma_handle->second;
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||||
ASSERT(final_vma.size == size);
|
ASSERT(final_vma.size == size);
|
||||||
|
|
||||||
Core::CPU().MapBackingMemory(target, size, block->data() + offset,
|
auto& system = Core::System::GetInstance();
|
||||||
VMAPermission::ReadWriteExecute);
|
system.ArmInterface(0).MapBackingMemory(target, size, block->data() + offset,
|
||||||
|
VMAPermission::ReadWriteExecute);
|
||||||
|
system.ArmInterface(1).MapBackingMemory(target, size, block->data() + offset,
|
||||||
|
VMAPermission::ReadWriteExecute);
|
||||||
|
system.ArmInterface(2).MapBackingMemory(target, size, block->data() + offset,
|
||||||
|
VMAPermission::ReadWriteExecute);
|
||||||
|
system.ArmInterface(3).MapBackingMemory(target, size, block->data() + offset,
|
||||||
|
VMAPermission::ReadWriteExecute);
|
||||||
|
|
||||||
final_vma.type = VMAType::AllocatedMemoryBlock;
|
final_vma.type = VMAType::AllocatedMemoryBlock;
|
||||||
final_vma.permissions = VMAPermission::ReadWrite;
|
final_vma.permissions = VMAPermission::ReadWrite;
|
||||||
|
@ -126,7 +133,11 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
|
||||||
VirtualMemoryArea& final_vma = vma_handle->second;
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||||
ASSERT(final_vma.size == size);
|
ASSERT(final_vma.size == size);
|
||||||
|
|
||||||
Core::CPU().MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
auto& system = Core::System::GetInstance();
|
||||||
|
system.ArmInterface(0).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
||||||
|
system.ArmInterface(1).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
||||||
|
system.ArmInterface(2).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
||||||
|
system.ArmInterface(3).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
||||||
|
|
||||||
final_vma.type = VMAType::BackingMemory;
|
final_vma.type = VMAType::BackingMemory;
|
||||||
final_vma.permissions = VMAPermission::ReadWrite;
|
final_vma.permissions = VMAPermission::ReadWrite;
|
||||||
|
@ -184,7 +195,11 @@ ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
|
||||||
|
|
||||||
ASSERT(FindVMA(target)->second.size >= size);
|
ASSERT(FindVMA(target)->second.size >= size);
|
||||||
|
|
||||||
Core::CPU().UnmapMemory(target, size);
|
auto& system = Core::System::GetInstance();
|
||||||
|
system.ArmInterface(0).UnmapMemory(target, size);
|
||||||
|
system.ArmInterface(1).UnmapMemory(target, size);
|
||||||
|
system.ArmInterface(2).UnmapMemory(target, size);
|
||||||
|
system.ArmInterface(3).UnmapMemory(target, size);
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,8 +28,13 @@ static PageTable* current_page_table = nullptr;
|
||||||
|
|
||||||
void SetCurrentPageTable(PageTable* page_table) {
|
void SetCurrentPageTable(PageTable* page_table) {
|
||||||
current_page_table = page_table;
|
current_page_table = page_table;
|
||||||
if (Core::System::GetInstance().IsPoweredOn()) {
|
|
||||||
Core::CPU().PageTableChanged();
|
auto& system = Core::System::GetInstance();
|
||||||
|
if (system.IsPoweredOn()) {
|
||||||
|
system.ArmInterface(0).PageTableChanged();
|
||||||
|
system.ArmInterface(1).PageTableChanged();
|
||||||
|
system.ArmInterface(2).PageTableChanged();
|
||||||
|
system.ArmInterface(3).PageTableChanged();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -121,6 +121,7 @@ struct Values {
|
||||||
|
|
||||||
// Core
|
// Core
|
||||||
bool use_cpu_jit;
|
bool use_cpu_jit;
|
||||||
|
bool use_multi_core;
|
||||||
|
|
||||||
// Data Storage
|
// Data Storage
|
||||||
bool use_virtual_sd;
|
bool use_virtual_sd;
|
||||||
|
|
|
@ -155,6 +155,8 @@ TelemetrySession::TelemetrySession() {
|
||||||
|
|
||||||
// Log user configuration information
|
// Log user configuration information
|
||||||
AddField(Telemetry::FieldType::UserConfig, "Core_UseCpuJit", Settings::values.use_cpu_jit);
|
AddField(Telemetry::FieldType::UserConfig, "Core_UseCpuJit", Settings::values.use_cpu_jit);
|
||||||
|
AddField(Telemetry::FieldType::UserConfig, "Core_UseMultiCore",
|
||||||
|
Settings::values.use_multi_core);
|
||||||
AddField(Telemetry::FieldType::UserConfig, "Renderer_ResolutionFactor",
|
AddField(Telemetry::FieldType::UserConfig, "Renderer_ResolutionFactor",
|
||||||
Settings::values.resolution_factor);
|
Settings::values.resolution_factor);
|
||||||
AddField(Telemetry::FieldType::UserConfig, "Renderer_ToggleFramelimit",
|
AddField(Telemetry::FieldType::UserConfig, "Renderer_ToggleFramelimit",
|
||||||
|
|
|
@ -78,6 +78,7 @@ void Config::ReadValues() {
|
||||||
|
|
||||||
qt_config->beginGroup("Core");
|
qt_config->beginGroup("Core");
|
||||||
Settings::values.use_cpu_jit = qt_config->value("use_cpu_jit", true).toBool();
|
Settings::values.use_cpu_jit = qt_config->value("use_cpu_jit", true).toBool();
|
||||||
|
Settings::values.use_multi_core = qt_config->value("use_multi_core", false).toBool();
|
||||||
qt_config->endGroup();
|
qt_config->endGroup();
|
||||||
|
|
||||||
qt_config->beginGroup("Renderer");
|
qt_config->beginGroup("Renderer");
|
||||||
|
@ -177,6 +178,7 @@ void Config::SaveValues() {
|
||||||
|
|
||||||
qt_config->beginGroup("Core");
|
qt_config->beginGroup("Core");
|
||||||
qt_config->setValue("use_cpu_jit", Settings::values.use_cpu_jit);
|
qt_config->setValue("use_cpu_jit", Settings::values.use_cpu_jit);
|
||||||
|
qt_config->setValue("use_multi_core", Settings::values.use_multi_core);
|
||||||
qt_config->endGroup();
|
qt_config->endGroup();
|
||||||
|
|
||||||
qt_config->beginGroup("Renderer");
|
qt_config->beginGroup("Renderer");
|
||||||
|
|
|
@ -20,6 +20,7 @@ ConfigureGeneral::ConfigureGeneral(QWidget* parent)
|
||||||
this->setConfiguration();
|
this->setConfiguration();
|
||||||
|
|
||||||
ui->use_cpu_jit->setEnabled(!Core::System::GetInstance().IsPoweredOn());
|
ui->use_cpu_jit->setEnabled(!Core::System::GetInstance().IsPoweredOn());
|
||||||
|
ui->use_multi_core->setEnabled(!Core::System::GetInstance().IsPoweredOn());
|
||||||
ui->use_docked_mode->setEnabled(!Core::System::GetInstance().IsPoweredOn());
|
ui->use_docked_mode->setEnabled(!Core::System::GetInstance().IsPoweredOn());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,6 +31,7 @@ void ConfigureGeneral::setConfiguration() {
|
||||||
ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing);
|
ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing);
|
||||||
ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme));
|
ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme));
|
||||||
ui->use_cpu_jit->setChecked(Settings::values.use_cpu_jit);
|
ui->use_cpu_jit->setChecked(Settings::values.use_cpu_jit);
|
||||||
|
ui->use_multi_core->setChecked(Settings::values.use_multi_core);
|
||||||
ui->use_docked_mode->setChecked(Settings::values.use_docked_mode);
|
ui->use_docked_mode->setChecked(Settings::values.use_docked_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,6 +42,7 @@ void ConfigureGeneral::applyConfiguration() {
|
||||||
ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString();
|
ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString();
|
||||||
|
|
||||||
Settings::values.use_cpu_jit = ui->use_cpu_jit->isChecked();
|
Settings::values.use_cpu_jit = ui->use_cpu_jit->isChecked();
|
||||||
|
Settings::values.use_multi_core = ui->use_multi_core->isChecked();
|
||||||
Settings::values.use_docked_mode = ui->use_docked_mode->isChecked();
|
Settings::values.use_docked_mode = ui->use_docked_mode->isChecked();
|
||||||
Settings::Apply();
|
Settings::Apply();
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,13 @@
|
||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QCheckBox" name="use_multi_core">
|
||||||
|
<property name="text">
|
||||||
|
<string>Enable multi-core</string>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
</layout>
|
</layout>
|
||||||
</item>
|
</item>
|
||||||
</layout>
|
</layout>
|
||||||
|
|
|
@ -63,7 +63,7 @@ void RegistersWidget::OnDebugModeEntered() {
|
||||||
|
|
||||||
for (int i = 0; i < core_registers->childCount(); ++i)
|
for (int i = 0; i < core_registers->childCount(); ++i)
|
||||||
core_registers->child(i)->setText(
|
core_registers->child(i)->setText(
|
||||||
1, QString("0x%1").arg(Core::CPU().GetReg(i), 8, 16, QLatin1Char('0')));
|
1, QString("0x%1").arg(Core::CurrentArmInterface().GetReg(i), 8, 16, QLatin1Char('0')));
|
||||||
|
|
||||||
UpdateCPSRValues();
|
UpdateCPSRValues();
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ void RegistersWidget::CreateCPSRChildren() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegistersWidget::UpdateCPSRValues() {
|
void RegistersWidget::UpdateCPSRValues() {
|
||||||
const u32 cpsr_val = Core::CPU().GetCPSR();
|
const u32 cpsr_val = Core::CurrentArmInterface().GetCPSR();
|
||||||
|
|
||||||
cpsr->setText(1, QString("0x%1").arg(cpsr_val, 8, 16, QLatin1Char('0')));
|
cpsr->setText(1, QString("0x%1").arg(cpsr_val, 8, 16, QLatin1Char('0')));
|
||||||
cpsr->child(0)->setText(
|
cpsr->child(0)->setText(
|
||||||
|
|
|
@ -51,13 +51,21 @@ std::size_t WaitTreeItem::Row() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() {
|
std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() {
|
||||||
const auto& threads = Core::System::GetInstance().Scheduler().GetThreadList();
|
|
||||||
std::vector<std::unique_ptr<WaitTreeThread>> item_list;
|
std::vector<std::unique_ptr<WaitTreeThread>> item_list;
|
||||||
item_list.reserve(threads.size());
|
std::size_t row = 0;
|
||||||
for (std::size_t i = 0; i < threads.size(); ++i) {
|
auto add_threads = [&](const std::vector<Kernel::SharedPtr<Kernel::Thread>>& threads) {
|
||||||
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
for (std::size_t i = 0; i < threads.size(); ++i) {
|
||||||
item_list.back()->row = i;
|
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
||||||
}
|
item_list.back()->row = row;
|
||||||
|
++row;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
add_threads(Core::System::GetInstance().Scheduler(0)->GetThreadList());
|
||||||
|
add_threads(Core::System::GetInstance().Scheduler(1)->GetThreadList());
|
||||||
|
add_threads(Core::System::GetInstance().Scheduler(2)->GetThreadList());
|
||||||
|
add_threads(Core::System::GetInstance().Scheduler(3)->GetThreadList());
|
||||||
|
|
||||||
return item_list;
|
return item_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,6 +248,9 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
||||||
|
list.push_back(std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.ideal_core)));
|
||||||
|
list.push_back(
|
||||||
|
std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.affinity_mask)));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
|
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
||||||
.arg(thread.current_priority)
|
.arg(thread.current_priority)
|
||||||
|
|
|
@ -91,6 +91,7 @@ void Config::ReadValues() {
|
||||||
|
|
||||||
// Core
|
// Core
|
||||||
Settings::values.use_cpu_jit = sdl2_config->GetBoolean("Core", "use_cpu_jit", true);
|
Settings::values.use_cpu_jit = sdl2_config->GetBoolean("Core", "use_cpu_jit", true);
|
||||||
|
Settings::values.use_multi_core = sdl2_config->GetBoolean("Core", "use_multi_core", false);
|
||||||
|
|
||||||
// Renderer
|
// Renderer
|
||||||
Settings::values.resolution_factor =
|
Settings::values.resolution_factor =
|
||||||
|
|
|
@ -80,6 +80,10 @@ touch_device=
|
||||||
# 0: Interpreter (slow), 1 (default): JIT (fast)
|
# 0: Interpreter (slow), 1 (default): JIT (fast)
|
||||||
use_cpu_jit =
|
use_cpu_jit =
|
||||||
|
|
||||||
|
# Whether to use multi-core for CPU emulation
|
||||||
|
# 0 (default): Disabled, 1: Enabled
|
||||||
|
use_multi_core=
|
||||||
|
|
||||||
[Renderer]
|
[Renderer]
|
||||||
# Whether to use software or hardware rendering.
|
# Whether to use software or hardware rendering.
|
||||||
# 0: Software, 1 (default): Hardware
|
# 0: Software, 1 (default): Hardware
|
||||||
|
|
Loading…
Reference in New Issue