If the system locks up only when you change the TR, then check the following
if the descriptors of CS, DS, SS work correctly when you load them then the problem
it's in the TSS descriptor. Keep in mind that if you define the table you must take into account the
size.
I give you an example of my code, which works correctly with several CPUs, it is just a summary of the code:
Code: Select all
// Some definitions
struct SgdtDescriptor {
u16 limit0_15;
u16 base0_15;
u8 base16_23;
u8 dacces;
u8 limit16_19: 4;
u8 option: 4;
u8 base24_31;
} __ attribute__ ((packed));
struct Sgdtr {
u16 limit;
u32 base;
} __attribute__ ((packed));
struct Stss {
u16 ret_task, ret_task_unused;
u32 esp0;
u16 ss0, ss0_unused;
u32 esp1;
u16 ss1, ss1_unused;
u32 esp2;
u16 ss2, ss2_unused;
u32 cr3;
u32 eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi;
u16 is, is_unused;
u16 cs, cs_unused;
u16 ss, ss_unused;
u16 ds, ds_unused;
u16 fs, fs_unused;
u16 gs, gs_unused;
u16 ldt_selector, ldt_sel_unused;
u16 debug_flag, io_map;
} __ attribute__ ((packed));
struct Scpus {
Stss tss;
SgdtDescriptor kgdt [8]; // Only eight 0's, KCS, KDS, KES, UCS, UDS, USS, UTSS
Sgdtr gdtr;
u32 kstack;
} __attribute __ ((packed));
// Code snippet
void CmodeProtected :: initGdtDescriptor (u32 abase, u32 alimit, u8 adacces, u8 aoption, struct SgdtDescriptor * adescriptor) {
adescriptor-> limit0_15 = (alimit & 0xffff);
adescriptor-> base0_15 = (abase & 0xffff);
adescriptor-> base16_23 = (abase & 0xff0000) >> 16;
adescriptor-> dacces = adacces;
adescriptor-> limit16_19 = (alimit & 0xf0000) >> 16;
adescriptor-> option = (aoption & 0xf);
adescriptor-> base24_31 = (abase & 0xff000000) >> 24;
}
void CmodeProtected :: initGdt (Scpus * cpu) {
cpu-> tss.debug_flag = 0x00;
cpu-> tss.io_map = 0x00;
cpu-> tss.esp0 = cpu-> kstack; // Address for the kernel stack on ring 0
cpu-> tss.ss0 = 0x18; // Kernel stack segment descriptor
initGdtDescriptor (0x0, 0x0, 0x0, 0x0, & cpu-> kgdt [0]); // Null descriptor always Index 0
initGdtDescriptor (0x0, 0xFFFFF, 0x9B, 0x0D, & cpu-> kgdt [1]); // Index 1 0x8 KCS
initGdtDescriptor (0x0, 0xFFFFF, 0x93, 0x0D, & cpu-> kgdt [2]); // Index 2 0x10 KDS
initGdtDescriptor (0x0, 0x00000, 0x97, 0x0D, & cpu-> kgdt [3]); // Index 3 0x18 KSS
initGdtDescriptor (0x0, 0xFFFFF, 0xFF, 0x0D, & cpu-> kgdt [4]); // index 4 0x20 Task CS
initGdtDescriptor (0x0, 0xFFFFF, 0xF3, 0x0D, & cpu-> kgdt [5]); // index 5 0x28 Task DS
initGdtDescriptor (0x0, 0x0, 0xF7, 0x0D, & cpu-> kgdt [6]); // index 6 0x30 Task SS
initGdtDescriptor ((u32) & cpu-> tss, 0x67, 0xE9, 0x00, & cpu-> kgdt [7]); // index 7 0x38 TSS
cpu-> gdtr.limit = 8 * 8;
cpu-> gdtr.base = (u32) cpu-> kgdt;
Sgdtr * ptr = & cpu-> gdtr;
asm volatile ("lgdtl (% 0)": "= r" (ptr));
asm volatile ("movw $ 0x10,% ax \ n \
movw% ax,% ds \ n \
movw% ax,% is \ n \
movw% ax,% fs \ n \
movw% ax,% gs \ n \
ljmp $ 0x08, $ change \ n \
change: nop \ n ");
asm volatile ("movw $ 0x18,% ax \ n \
movw% ax,% ss \ n \
");
}
int start () {
initGdt (& cpus [0]);
................
................
asm ("movw $ 0x38,% ax; ltr% ax"); // Load the Task Log and Jump to the Kernel
kernel ();
}