x86_64 Paging leads to triple fault

Question about which tools to use, bugs, the best way to implement a function, etc should go here. Don't forget to see if your question is answered in the wiki first! When in doubt post here.
Post Reply
User avatar
ashusharmadev
Posts: 2
Joined: Thu Jan 13, 2022 9:04 am
Libera.chat IRC: ashusharmadev

x86_64 Paging leads to triple fault

Post by ashusharmadev »

I know, there are much same questions, but non of those fixed my problem, the noticable thing is that when i goes to write any kernel, my paging can't complete, on all the kernels that i have created at my own, there's paging failed, one kernel is succeeded, that is tutorial.

My Paging Code:

Paging.h >

Code: Select all


#pragma once

#include <Caitra/Common/Datatypes.h>
#include <Caitra/Common/Macros.h>
#include <Caitra/Debug/Assert.h>

namespace Caitra::x86_64::Paging
{
    
    struct PML4Entry
    {
        bool Present : 1;   /* Must be 1, to reference a PML-1 */
        bool Writable : 1;  /* If 0, Write access will not permitted. */
        bool User : 1;      /* If 0, User will not permitted to access */
        bool WriteThought : 1;  /* Page-level Write Through */
        bool Cache : 1;     /* Page Level Cache Disable */
        bool Accessed : 1;  /* Indicates whether this entry has been used */
        int Zero0 : 6;      /* Ignored */
        uint64_t PhysicalAddress : 36; /* Physical address of a 4-KByte aligned PLM-1 */
        int Zero1 : 15;     /* Ignored */
        bool ExecuteDisabled : 1;   /* If IA32_EFER.NXE = 1, Execute Disable */
    } __PACKED__;

    
    struct PML4 /* : public AddressSpace */
    {
        PML4Entry entries[512];
    } __PACKED__;
    

    static inline size_t PML4Index(uintptr_t address)
    {
        return (address >> 39) & 0x1FF;
    }

    // static_assert(sizeof(PML4Entry) == sizeof(uint64_t));
    // static_assert(sizeof(PML4) == 4096);
    // assert_truth(sizeof(PML4Entry) == sizeof(uint64_t));
    // assert_truth(sizeof(PML4) == sizeof(uint64_t));

    struct PML3Entry
    {
        bool Present : 1;   /* Must be 1, to reference a PML-1 */
        bool Writable : 1;  /* If 0, Write access will not permitted. */
        bool User : 1;      /* If 0, User will not permitted to access */
        bool WriteThought : 1;  /* Page-level Write Through */
        bool Cache : 1;     /* Page Level Cache Disable */
        bool Accessed : 1;  /* Indicates whether this entry has been used */
        int Zero0 : 1;      /* Ignored */
        int Size : 1;       /* Must be 0, Otherwise this entry maps a 1-GByte Page */
        int Zero1 : 4;      /* Ignored */
        uint64_t PhysicalAddress : 36; /* Physical address of a 4-KByte aligned PLM-1 */
        int Zero2 : 15;     /* Ignored */
        bool ExecuteDisabled : 1;   /* If IA32_EFER.NXE = 1, Execute Disable */
    } __PACKED__;
    
    struct PML3
    {
        PML3Entry entries[512];
    } __PACKED__;

    static inline size_t PML3Index(uintptr_t address)
    {
        return (address >> 30) & 0x1FF;
    }

    // static_assert(sizeof(PML3Entry) == sizeof(uint64_t));
    // static_assert(sizeof(PML3) == 4096);
    // assert_truth(sizeof(PML3Entry) == sizeof(uint64_t));
    // assert_truth(sizeof(PML3) == 4096);
    
    struct PML2Entry
    {
        bool Present : 1;   /* Must be 1, to reference a PML-1 */
        bool Writable : 1;  /* If 0, Write access will not permitted. */
        bool User : 1;      /* If 0, User will not permitted to access */
        bool WriteThought : 1;  /* Page-level Write Through */
        bool Cache : 1;     /* Page Level Cache Disable */
        bool Accessed : 1;  /* Indicates whether this entry has been used */
        int Zero0 : 1;      /* Ignored */
        int Size : 1;       /* Must be 0, Otherwise this entry maps a 1-GByte Page */
        int Zero1 : 1;      /* Ignored */
        uint64_t PhysicalAddress : 36; /* Physical address of a 4-KByte aligned PLM-1 */
        int Zero2 : 15;     /* Ignored */
        bool ExecuteDisabled : 1;   /* If IA32_EFER.NXE = 1, Execute Disable */
    } __PACKED__;


    struct PML2
    {
        PML2Entry entries[512];
    } __PACKED__;

    static inline size_t PML2Index(uintptr_t address)
    {
        return (address >> 21) & 0x1FF;
    }

    
    struct PML1Entry
    {
        bool Present : 1; 
        bool Writable : 1; 
        bool User : 1;     
        bool WriteThought : 1;  
        bool Cache : 1;   
        bool Accessed : 1;  
        int Dirty : 1;    
        int MemoryType : 1; 
        int Global : 1;
        int Zero0 : 3;
        uint64_t PhysicalAddress : 36;
        int Zero1 : 10;     /* Ignored */
        bool ProtectionKey : 5; 
        bool ExecuteDisabled : 1;
    } __PACKED__;
    

    struct PML1
    {
        PML1Entry entries[512];
    } __PACKED__;

    static inline size_t PML1Index(uintptr_t address)
    {
        return (address >> 12) & 0x1FF;
    }

    PML4 *GetkPML4();
    PML4 *CreatePML4();
    
    void SwitchPML4(PML4 *pml4);

    void VirtualInitialize();
    void EnableVirtualMemory();


} // namespace Caitra::x86_64::Paging


extern "C" void LoadPD(uintptr_t directory);
extern "C" void InvalidateTLB();

Paging.cpp >

Code: Select all


#include <x86_64-pc/Memory/MemoryPaging.h>
#include <System/kApps.h>

namespace Caitra::x86_64::Paging
{

    PML4 kPML4 __attribute__((aligned(0x1000))) = {};
    PML3 kPML3 __attribute__((aligned(0x1000))) = {};
    PML2 kPML2 __attribute__((aligned(0x1000))) = {};
    PML1 kPML1[512] __attribute__((aligned(0x1000))) = {};

    extern "C" uint64_t kend;

    PML4 *GetkPML4()
    {
        return &kPML4;
    }

    // PML4 *CreatePML4()
    // {

    // }

    
    void DestroyPML4(PML4 *pml4)
    {

    }

    void SwitchPML4(PML4 *pml4)
    {
        Caitra::System::kApps::kLog[0] << "\nPML4 : " << (Void*)pml4;
        
        // asm("cli");
        
        // assert_truth((uintptr_t)pml4 > kend);
        // assert_truth((uintptr_t)pml4 < kend);

        LoadPageDirectory((uintptr_t)pml4);
    }

    void EnableVirtualMemory()
    {
        SwitchPML4(GetkPML4());
    }

    void VirtualInitialize()
    {
        assert_truth(sizeof(PML4Entry) == sizeof(uint64_t));
        
        assert_truth(sizeof(PML4) == 4096);

        assert_truth(sizeof(PML3Entry) == sizeof(uint64_t));
        assert_truth(sizeof(PML3) == 4096);

        assert_truth(sizeof(PML2Entry) == sizeof(uint64_t));
        assert_truth(sizeof(PML2) == 4096);

        assert_truth(sizeof(PML1Entry) == sizeof(uint64_t));
        assert_truth(sizeof(PML1) == 4096);

        auto &entryPML4 = kPML4.entries[0];
        entryPML4.User = 0;
        entryPML4.Writable = 1;
        entryPML4.Present = 1;
        entryPML4.PhysicalAddress = (uint64_t)&kPML3 / 0x1000;

        auto &entryPML3 = kPML3.entries[0];
        entryPML3.User = 0;
        entryPML3.Writable = 1;
        entryPML3.Present = 1;
        entryPML3.PhysicalAddress = (uint64_t)&kPML2 / 0x1000;

        for (size_t i = 0; i < 512; i++)
        {
            auto &entryPML2 = kPML2.entries[i];
            entryPML2.User = 0;
            entryPML2.Writable = 1;
            entryPML2.Present = 1;
            entryPML2.PhysicalAddress = (uint64_t)&kPML1[i] / 0x1000;
        }
        
    }



} // namespace Caitra::x86_64::Paging

Paging.asm >

Code: Select all


global LoadPageDirectory
LoadPageDirectory:
    mov cr3, rdi
    ret

When LoadingPageDirectory, Faults happen
The upper code is not my own code, it is of skift os, it is also triple faulting, and more tutorials that are i am followed all those leads to reboot.
What is problem in my kernel? Is it can be caused by GDT or IDT?
Octocontrabass
Member
Member
Posts: 5563
Joined: Mon Mar 25, 2013 7:01 pm

Re: x86_64 Paging leads to triple fault

Post by Octocontrabass »

What kind of debugging have you tried so far?

You can use "info mem" and "info tlb" in the QEMU monitor to check your page tables. You can add "-d int" to your QEMU command line to dump the CPU state when the first exception occurs, which might also be helpful.

You can use an actual debugger to step through your code and examine what it's doing before it tries to enable paging. If your page tables are getting messed up somewhere, this can help you track down exactly where.
michael
Posts: 12
Joined: Fri Nov 12, 2021 1:09 am

Re: x86_64 Paging leads to triple fault

Post by michael »

If you are working on the switch between 32 protect mode to ia-32e mode, maybe you forgot to enable PAE mode
see Intel® 64 and IA-32 architectures software developer’s manual 9.8.5 Initializing IA-32e Mode

Or you use debugger to directly check your page table
Post Reply