lindows/lcrash/mm/virt.c

95 lines
2.9 KiB
C

#include <lcrash/mm/virt.h>
#include <lcrash/mm/kmalloc.h>
#include <lcrash/debug/debug.h>
u64 (*VmemOriginalPML4)[512] = 0;
u64 (*VmemPML4)[512] = 0;
void VmemMapMemory(void* Phys, void* Virt, u64 Size) {
u64 PhysStart = (u64)Phys;
u64 VirtStart = (u64)Virt;
if ((PhysStart | VirtStart | Size) & 0xfff) Panic("VmemMapMemory: Addresses not page aligned");
for (u64 Offset4 = VirtStart; Offset4 < VirtStart + Size; Offset4 += 0x8000000000) {
for (u64 Offset3 = Offset4; Offset3 < VirtStart + Size && Offset3 < Offset4 + 0x8000000000; Offset3 += 0x40000000) {
for (u64 Offset2 = Offset3; Offset2 < VirtStart + Size && Offset2 < Offset3 + 0x40000000; Offset2 += 0x200000) {
for (u64 Offset1 = Offset2; Offset2 < VirtStart + Size && Offset1 < Offset2 + 0x200000; Offset1 += 0x1000) {
}
}
}
}
}
void VmemInitialize() {
// Copy control registers
u64 CR3;
asm ("mov %%cr3, %0" : "=r" (CR3));
u64 CR4;
asm ("mov %%cr4, %0" : "=r" (CR4));
if (CR4 & 0x800) Panic("5 Level paging not supported");
u64 (*OriginalPML4)[512] = (u64(*)[512])(CR3 & 0x0007fffffffff000);
u64 (*NewPML4)[512] = KernelHeapAlloc(sizeof(u64) * 512, 0x1000, 0);
for (int i = 0; i < 512; i++) {
if ((*OriginalPML4)[i] & 1) {
u64 (*OriginalPDPT)[512] = (u64(*)[512])(((*OriginalPML4)[i] & 0x0007fffffffff000));
u64 (*NewPDPT)[512] = KernelHeapAlloc(sizeof(u64) * 512, 0x1000, 0);
for (int j = 0; j < 512; j++) {
if ((*OriginalPDPT)[j] & 1) {
if ((*OriginalPDPT)[j] & 128) {
(*NewPDPT)[j] = (*OriginalPDPT)[j];
} else {
u64 (*OriginalPD)[512] = (u64(*)[512])(((*OriginalPDPT)[j] & 0x000ffffffffff000));
u64 (*NewPD)[512] = KernelHeapAlloc(sizeof(u64) * 512, 0x1000, 0);
for (int k = 0; k < 512; k++) {
if ((*OriginalPD)[k] & 1) {
if ((*OriginalPD)[k] & 128) {
(*NewPD)[k] = (*OriginalPD)[k];
} else {
u64 (*OriginalPT)[512] = (u64(*)[512])(((*OriginalPD)[k] & 0x000ffffffffff000));
u64 (*NewPT)[512] = KernelHeapAlloc(sizeof(u64) * 512, 0x1000, 0);
if ((s64)NewPT <= 0) Panic("Failed to allocate memory");
for (int l = 0; l < 512; l++) {
(*NewPT)[l] = (*OriginalPT)[l];
}
NewPT = (void*)((u64)OriginalPT | (((*OriginalPD)[k] & ~0x000ffffffffff000)));
(*NewPD)[k] = (u64)NewPT;
}
} else (*NewPD)[k] = 0;
}
NewPD = (void*)((uptr)NewPD | (((*OriginalPDPT)[j]) & ~0x0007fffffffff000));
(*NewPDPT)[j] = (u64)NewPD;
}
} else (*NewPDPT)[j] = 0;
}
NewPDPT = (void*)((uptr)NewPDPT | (((*OriginalPML4)[i]) & ~0x0007fffffffff000));
(*NewPML4)[i] = (u64)NewPDPT;
} else (*NewPML4)[i] = 0;
}
// Save these
VmemOriginalPML4 = OriginalPML4;
VmemPML4 = NewPML4;
// Load our new page table
CR3 ^= (u64)VmemOriginalPML4;
CR3 |= ((u64)VmemPML4) & 0x0007fffffffff000;
asm volatile ("mov %q0, %%cr3" : : "r" (CR3));
}