mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-24 07:25:45 +01:00
kernel/vm_manager: Use const where applicable
Makes our immutable state explicit.
This commit is contained in:
parent
ce5ad45278
commit
c4e0c3d76c
@ -267,7 +267,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
|
|||||||
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
||||||
info_sub_id, handle);
|
info_sub_id, handle);
|
||||||
|
|
||||||
auto& vm_manager = Core::CurrentProcess()->vm_manager;
|
const auto& vm_manager = Core::CurrentProcess()->vm_manager;
|
||||||
|
|
||||||
switch (static_cast<GetInfoType>(info_id)) {
|
switch (static_cast<GetInfoType>(info_id)) {
|
||||||
case GetInfoType::AllowedCpuIdBitmask:
|
case GetInfoType::AllowedCpuIdBitmask:
|
||||||
|
@ -175,9 +175,9 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
|||||||
|
|
||||||
ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
|
ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
|
||||||
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
||||||
VAddr target_end = target + size;
|
const VAddr target_end = target + size;
|
||||||
|
|
||||||
VMAIter end = vma_map.end();
|
const VMAIter end = vma_map.end();
|
||||||
// The comparison against the end of the range must be done using addresses since VMAs can be
|
// The comparison against the end of the range must be done using addresses since VMAs can be
|
||||||
// merged during this process, causing invalidation of the iterators.
|
// merged during this process, causing invalidation of the iterators.
|
||||||
while (vma != end && vma->second.base < target_end) {
|
while (vma != end && vma->second.base < target_end) {
|
||||||
@ -207,9 +207,9 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne
|
|||||||
|
|
||||||
ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
|
ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
|
||||||
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
||||||
VAddr target_end = target + size;
|
const VAddr target_end = target + size;
|
||||||
|
|
||||||
VMAIter end = vma_map.end();
|
const VMAIter end = vma_map.end();
|
||||||
// The comparison against the end of the range must be done using addresses since VMAs can be
|
// The comparison against the end of the range must be done using addresses since VMAs can be
|
||||||
// merged during this process, causing invalidation of the iterators.
|
// merged during this process, causing invalidation of the iterators.
|
||||||
while (vma != end && vma->second.base < target_end) {
|
while (vma != end && vma->second.base < target_end) {
|
||||||
@ -258,14 +258,14 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) {
|
|||||||
return ERR_INVALID_ADDRESS;
|
return ERR_INVALID_ADDRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualMemoryArea& vma = vma_handle->second;
|
const VirtualMemoryArea& vma = vma_handle->second;
|
||||||
if (vma.type != VMAType::Free) {
|
if (vma.type != VMAType::Free) {
|
||||||
// Region is already allocated
|
// Region is already allocated
|
||||||
return ERR_INVALID_ADDRESS_STATE;
|
return ERR_INVALID_ADDRESS_STATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr start_in_vma = base - vma.base;
|
const VAddr start_in_vma = base - vma.base;
|
||||||
VAddr end_in_vma = start_in_vma + size;
|
const VAddr end_in_vma = start_in_vma + size;
|
||||||
|
|
||||||
if (end_in_vma > vma.size) {
|
if (end_in_vma > vma.size) {
|
||||||
// Requested allocation doesn't fit inside VMA
|
// Requested allocation doesn't fit inside VMA
|
||||||
@ -288,13 +288,13 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
|
|||||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
|
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
|
||||||
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target);
|
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target);
|
||||||
|
|
||||||
VAddr target_end = target + size;
|
const VAddr target_end = target + size;
|
||||||
ASSERT(target_end >= target);
|
ASSERT(target_end >= target);
|
||||||
ASSERT(target_end <= MAX_ADDRESS);
|
ASSERT(target_end <= MAX_ADDRESS);
|
||||||
ASSERT(size > 0);
|
ASSERT(size > 0);
|
||||||
|
|
||||||
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
||||||
VMAIter i_end = vma_map.lower_bound(target_end);
|
const VMAIter i_end = vma_map.lower_bound(target_end);
|
||||||
for (auto i = begin_vma; i != i_end; ++i) {
|
for (auto i = begin_vma; i != i_end; ++i) {
|
||||||
if (i->second.type == VMAType::Free) {
|
if (i->second.type == VMAType::Free) {
|
||||||
return ERR_INVALID_ADDRESS_STATE;
|
return ERR_INVALID_ADDRESS_STATE;
|
||||||
@ -346,7 +346,7 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
|
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
|
||||||
VMAIter next_vma = std::next(iter);
|
const VMAIter next_vma = std::next(iter);
|
||||||
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
|
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
|
||||||
iter->second.size += next_vma->second.size;
|
iter->second.size += next_vma->second.size;
|
||||||
vma_map.erase(next_vma);
|
vma_map.erase(next_vma);
|
||||||
@ -382,22 +382,22 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 VMManager::GetTotalMemoryUsage() {
|
u64 VMManager::GetTotalMemoryUsage() const {
|
||||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||||
return 0xF8000000;
|
return 0xF8000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 VMManager::GetTotalHeapUsage() {
|
u64 VMManager::GetTotalHeapUsage() const {
|
||||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||||
return 0x0;
|
return 0x0;
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr VMManager::GetAddressSpaceBaseAddr() {
|
VAddr VMManager::GetAddressSpaceBaseAddr() const {
|
||||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||||
return 0x8000000;
|
return 0x8000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 VMManager::GetAddressSpaceSize() {
|
u64 VMManager::GetAddressSpaceSize() const {
|
||||||
LOG_WARNING(Kernel, "(STUBBED) called");
|
LOG_WARNING(Kernel, "(STUBBED) called");
|
||||||
return MAX_ADDRESS;
|
return MAX_ADDRESS;
|
||||||
}
|
}
|
||||||
|
@ -190,16 +190,16 @@ public:
|
|||||||
void LogLayout() const;
|
void LogLayout() const;
|
||||||
|
|
||||||
/// Gets the total memory usage, used by svcGetInfo
|
/// Gets the total memory usage, used by svcGetInfo
|
||||||
u64 GetTotalMemoryUsage();
|
u64 GetTotalMemoryUsage() const;
|
||||||
|
|
||||||
/// Gets the total heap usage, used by svcGetInfo
|
/// Gets the total heap usage, used by svcGetInfo
|
||||||
u64 GetTotalHeapUsage();
|
u64 GetTotalHeapUsage() const;
|
||||||
|
|
||||||
/// Gets the total address space base address, used by svcGetInfo
|
/// Gets the total address space base address, used by svcGetInfo
|
||||||
VAddr GetAddressSpaceBaseAddr();
|
VAddr GetAddressSpaceBaseAddr() const;
|
||||||
|
|
||||||
/// Gets the total address space address size, used by svcGetInfo
|
/// Gets the total address space address size, used by svcGetInfo
|
||||||
u64 GetAddressSpaceSize();
|
u64 GetAddressSpaceSize() const;
|
||||||
|
|
||||||
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
||||||
/// is scheduled.
|
/// is scheduled.
|
||||||
|
Loading…
Reference in New Issue
Block a user