So I'm posting this here because I've been hacking on it for the last few days (as well as asking a few people) and I'm struggling to understand what I'm doing wrong, since unless I'm misreading the GDT tutorial and Global Discriptor Table articles, it doesn't look like I'm doing anything wrong. However, I've been given the magic numbers below for the GDT entries and I can't seem to reproduce them (and the values in the GDT tutorial article don't appear to match those in the magic numbers).
Here's my problem: when I load the GDT, it works fine. No exceptions occur when LGDT is executed. However, when I do the long return (LRETQ), I get a #GP. I'm doing this in Zig (I've done this in Rust before, started playing with Zig and thought Hey, why not see how far I can get?), so To completely ensure that nothing gets mangled across the Zig/assembly FFI boundaries, I've used an assembly stub for the actual GDT loading:
Code: Select all
.section .data
gdtr:
.word 0
.quad 0
.section .text
.global load_gdt
.type load_gdt, @function
.align 8
load_gdt:
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
movw %di, gdtr+0
movq %rsi, gdtr+2
lgdtq (gdtr)
pushq $0x08
leaq reload_segment_regs(%rip), %rax
pushq %rax
lretq
reload_segment_regs:
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
movq %rbp, %rsp
popq %rbp
retq
Code: Select all
const GdtEntry = packed struct {
limit_lo: u16 = 0,
base_lo: u16 = 0,
base_mid: u8 = 0,
access: AccessByte = .{ .raw = 0x00 },
limit_hi: u4 = 0,
flags: Flags = .{ .raw = 0x0 },
base_hi: u8 = 0,
pub fn asU64(self: *GdtEntry) u64 {
return mem.readIntNative(u64, &mem.toBytes(self.*));
}
};
comptime {
if (@sizeOf(GdtEntry) != 8 or @bitSizeOf(GdtEntry) != 64) @compileError("GdtEntry must be 8 bytes!");
if (@bitOffsetOf(GdtEntry, "limit_lo") != 0) @compileError("Limit lo must be at bit offset 0!");
if (@bitOffsetOf(GdtEntry, "base_lo") == 15) @compileError("base_lo must be at bit offset 16!");
if (@bitOffsetOf(GdtEntry, "base_mid") != 32) @compileError("base_mid must be at bit offset 32!");
if (@bitOffsetOf(GdtEntry, "access") != 40) @compileError("access byte must be at bit offset 40!");
if (@bitOffsetOf(GdtEntry, "limit_hi") != 48) @compileError("limit_hi must be at bit offset 48!");
if (@bitOffsetOf(GdtEntry, "flags") != 52) @compileError("flags must be a bit offset 52!");
if (@bitOffsetOf(GdtEntry, "base_hi") != 56) @compileError("base_hi must be at bit offset 56!");
}
const AccessByte = packed union {
user_segment: packed struct {
accessed: bool,
read_write: bool,
direction_conforming: bool,
executable: bool,
system: bool,
dpl: u2,
present: bool,
},
system_segment: packed struct {
segment_type: u4,
system: bool,
dpl: u2,
present: bool,
},
raw: u8,
};
const Flags = packed union {
fields: packed struct {
reserved: bool,
long_mode: bool,
pm_segment: bool,
granularity: bool,
},
raw: u4,
};
var gdt = [_]GdtEntry{.{}} ** 8;
...
const TssDescriptor = packed struct {
reserved1: u32 = 0,
rsp0: u64,
rsp1: u64,
rsp2: u64,
reserved2: u64 = 0,
ist1: u64,
ist2: u64,
ist3: u64,
ist4: u64,
ist5: u64,
ist6: u64,
ist7: u64,
reserved3: u32 = 0,
reserved4: u32 = 0,
reserved5: u8 = 0,
iopb: u16,
};
comptime {
if (@sizeOf(TssDescriptor) != 104) @compileError("TSS descriptor must be 104 bytes in size");
}
var tss: TssDescriptor = undefined;
// In init function
// 64-bit kernel code
gdt[1] = .{
.limit_lo = 0xFFFF,
.limit_hi = 0xF,
.access = .{
.user_segment = .{
.accessed = false,
.read_write = true,
.direction_conforming = false,
.executable = true,
.system = true,
.dpl = 0,
.present = true,
},
},
.flags = .{
.fields = .{
.reserved = false,
.long_mode = true,
.pm_segment = false,
.granularity = true,
},
},
};
// 64-bit kernel data
gdt[2] = .{
.limit_lo = 0xFFFF,
.limit_hi = 0xF,
.access = .{
.user_segment = .{
.accessed = false,
.read_write = true,
.direction_conforming = false,
.executable = false,
.system = true,
.dpl = 0,
.present = true,
},
},
.flags = .{
.fields = .{
.reserved = false,
.long_mode = false,
.pm_segment = true,
.granularity = true,
},
},
};
// 64-bit user code
gdt[3] = .{
.limit_lo = 0xFFFF,
.limit_hi = 0xF,
.access = .{
.user_segment = .{
.accessed = false,
.read_write = true,
.direction_conforming = false,
.executable = true,
.system = true,
.dpl = 3,
.present = true,
},
},
.flags = .{
.fields = .{
.reserved = false,
.long_mode = true,
.pm_segment = false,
.granularity = true,
},
},
};
// 64-bit user data
gdt[4] = .{
.limit_lo = 0xFFFF,
.limit_hi = 0xF,
.access = .{
.user_segment = .{
.accessed = false,
.read_write = true,
.direction_conforming = false,
.executable = false,
.system = true,
.dpl = 3,
.present = true,
},
},
.flags = .{
.fields = .{
.reserved = false,
.long_mode = false,
.pm_segment = true,
.granularity = true,
},
},
};
// ... Code for setting up the TSS...
const tss_base = @intFromPtr(&tss);
const tss_limit = @sizeOf(TssDescriptor) - 1;
gdt[5] = .{
.base_lo = @truncate(tss_base >> 0),
.base_mid = @truncate(tss_base >> 16),
.base_hi = @truncate(tss_base >> 24),
.limit_lo = @truncate(tss_limit >> 0),
.limit_hi = @truncate(tss_limit >> 16),
.access = .{
.system_segment = .{
.segment_type = 0x9,
.system = false,
.dpl = 0,
.present = true,
},
},
};
gdt[6] = .{
.base_lo = @truncate(tss_base >> 32),
.base_mid = @truncate(tss_base >> 48),
.base_hi = @truncate(tss_base >> 56),
.limit_lo = 0x0,
.limit_hi = 0x0,
};
log.debug("GDT entries: {X}, {X}, {X}, {X}, {X}, {X}, {X}, {X}", .{ gdt[0].asU64(), gdt[1].asU64(), gdt[2].asU64(), gdt[3].asU64(), gdt[4].asU64(), gdt[5].asU64(), gdt[6].asU64(), gdt[7].asU64() });
load_gdt(@sizeOf(@TypeOf(gdt)) - 1, @intFromPtr(&gdt));
(I don't know if this is because of how the standard library functions work or whether I'm doing something wrong.) The magic numbers I was given, for the GDT entries, are 0x0000000000000000 (null), 0x00af9b000000ffff (64-bit code), 0x00af93000000ffff (64-bit data), 0x00affb000000ffff (64-bit user-mode code), and 0x00aff3000000ffff (64-bit user-mode data). I don't believe their wrong; I'm just unable to get them, and I feel like I've tried pretty much every strategy I can think of (I tried just dumping them into an array of u64s but that made computing the descriptors for the TSS annoying, so I settled for the struct approach.) I can confirm that the values are indeed being passed correctly to my assembly stub; probing the memory of the GDT pointer base does correctly yield the actual GDT, so that's functioning fine. Is there something super obvious that I'm overlooking? (I'm doing this all in Qemu 8.0.0, if that matters.)[info] [default]Initializing GDT and IDT
[debug] [default]GDT entries: 0, A000000000FF9B, C000000000FF93, A000000000FFFB, C000000000FFF3, 80000900AE200087, FF0000FFFFFF0000, 0