Multitasking hell
Posted: Thu Aug 24, 2006 12:06 pm
Hi,
My kernel is now in the stage of building in multitasking. Well I thought that's not hard to do I'll just port the scheduling code of my old OS and build new functions for creating processes/thread etc. Well I've cheked all over my kernel, debugged every file, but still didn't get an answer on why my kernel get's a general protection file every time I try to do a task switch.
In the current situation I have only one running thread, which is the current kernel thread. I've set up a TSS for my thread and loaded it with ltr, all goes fine here. I set up my execution queue which is ok as well. This is done following this code which is called from the kernel's main() function.
This looks OK right?? Now we go to the CreateProcess() function which initializes a process structure put it in a list, create a main thread, fill it with stuff. Now I only implemented stuff for dpl0.
Some few support functions for linked lists:
Now here comes my scheduler, it faults on the point of jumping to the other thread:
tasks.c
So I cheked if the value pushed was ok and it is, the gdt entry excists and is valid. Is my TSS corrupted? Have been debugging for 3 days now
Thanks in advance!
My kernel is now in the stage of building in multitasking. Well I thought that's not hard to do I'll just port the scheduling code of my old OS and build new functions for creating processes/thread etc. Well I've cheked all over my kernel, debugged every file, but still didn't get an answer on why my kernel get's a general protection file every time I try to do a task switch.
In the current situation I have only one running thread, which is the current kernel thread. I've set up a TSS for my thread and loaded it with ltr, all goes fine here. I set up my execution queue which is ok as well. This is done following this code which is called from the kernel's main() function.
Code: Select all
extern unsigned long* page_directory;
/* tss structure */
typedef struct tss_s
{
unsigned short previous, empty1;
unsigned long esp0;
unsigned short ss0, empty2;
unsigned long esp1;
unsigned short ss1, empty3;
unsigned long esp2;
unsigned short ss2, empty4;
unsigned long cr3;
unsigned long eip;
unsigned long eflags;
unsigned long eax;
unsigned long ecx;
unsigned long edx;
unsigned long ebx;
unsigned long esp;
unsigned long ebp;
unsigned long esi;
unsigned long edi;
unsigned short es, empty5;
unsigned short cs, empty6;
unsigned short ss, empty7;
unsigned short ds, empty8;
unsigned short fs, empty9;
unsigned short gs, empty10;
unsigned short ldt, empty11;
unsigned short trapflag;
unsigned short iomapbase;
} tss_t;
struct _thread;
struct _process;
typedef struct _thread
{
id_t tid; // identifier of the thread
unsigned long gdt; // entry of tss within gdt
tss_t* tss; // this should be clear
struct _process* parent;
struct _thread* next; // support for the scheduler
struct _thread* prev; // the same
} thread;
typedef struct _process
{
char name[16]; // this should be clear
thread* core; // This is the first thread of a process that can create other threads
// all properties of other thread of process are inherited by this one.
dpl_t dpl; // dpl level
id_t pid; // the identifier of the process
id_t uid; // which user is running this process
id_t gid; // which group is owner of this process
size_t number; // number of threads
struct _process* next; // for linked list
struct _process* prev; // for linked list
} process;
typedef struct ___process_head_node
{
/* points to next node */
process* next;
/* holds number of nodes in list */
size_t number;
} __process_head_node;
typedef struct ___schedule_list_head_node
{
/* points to next node */
thread* next;
size_t number;
} __schedule_list_head_node;
__process_head_node* process_head_node = 0;
__schedule_list_head_node* schedule_list_head_node = 0;
/* current scheduler node */
thread* node = 0;
/* proto */
void IdleTask();
/* initialize nodes */
void tasks_install()
{
/* allocate head node */
process_head_node = malloc(sizeof(__process_head_node));
schedule_list_head_node = malloc(sizeof(__schedule_list_head_node));
/* zero structure */
memset(process_head_node, 0, sizeof(__process_head_node));
memset(schedule_list_head_node, 0, sizeof(__schedule_list_head_node));
CreateProcess("idle", 0, 0, (addr_t)&IdleTask);
/* set first execution task */
node = process_head_node->next->core;
/* load task register */
__asm__("ltr %%ax"::"a" (schedule_list_head_node->next->gdt));
}
Code: Select all
id_t CreateProcess(char* name, id_t uid, dpl_t dpl, addr_t eip)
{
/* allocate */
process* p = malloc(sizeof(process));
/* clear node */
memset(p, 0, sizeof(process));
/* copy name */
strcpy(p->name, name);
/* fill general stuff */
p->dpl = dpl;
p->gid = 0; // todo
p->uid = uid;
p->pid = 0; // todo
p->number++;
/* create child thread */
/* allocate memory */
p->core = malloc(sizeof(thread));
/* clear node */
memset(p->core, 0, sizeof(thread));
/* set common stuff */
p->core->parent = p;
p->core->next = 0;
p->core->prev = 0;
p->core->tid = 0; // todo
p->core->gdt = 0;
p->core->tss = (tss_t*)AllocPage(); /* allocate in physical memory */
/* clear page */
ClearPage((addr_t*)p->core->tss);
/* fill TSS with appropriate values */
if(p->dpl == 0)
{
p->core->tss->eflags = EFLAG_BASE | EFLAG_INTERRUPT;
p->core->tss->eax = 0;
p->core->tss->ebx = 0;
p->core->tss->ecx = 0;
p->core->tss->edx = 0;
p->core->tss->esi = 0;
p->core->tss->edi = 0;
p->core->tss->ebp = 0;
p->core->tss->trapflag = 0;
p->core->tss->iomapbase = 0;
p->core->tss->cs = KERNEL_CS;
p->core->tss->es = KERNEL_DS;
p->core->tss->ds = KERNEL_DS;
p->core->tss->fs = KERNEL_DS;
p->core->tss->gs = KERNEL_DS;
p->core->tss->ss = KERNEL_DS;
p->core->tss->ss0 = KERNEL_DS;
p->core->tss->ss1 = KERNEL_DS;
p->core->tss->ss2 = KERNEL_DS;
p->core->tss->cr3 = (addr_t)page_directory;
//p->core->tss->esp = (addr_t)&sys_stack;
p->core->tss->esp = AllocPage() + PAGE_SIZE;
p->core->tss->esp0 = 0;
p->core->tss->esp1 = 0;
p->core->tss->esp2 = 0;
p->core->tss->ldt = 0;
p->core->tss->eip = (addr_t)eip;
/* get gdt entry */
p->core->gdt = 8 * GDTGetFreeSegment((addr_t)&p->core->tss, sizeof(tss_t), GDT_PRESENT | GDT_DPL0 | GDT_SYS | GDT_TSS32, GDT_AVAIL);
}
/* todo user tasks */
/* insert in list */
ProcessInsertNode(p);
/* insert core thread into execution list */
SchedulerInsertNode(p->core);
return p->pid;
}
Code: Select all
inline void ProcessInsertNode(process* p)
{
if(process_head_node->next)
{
/* modify next node */
process_head_node->next->prev = p;
}
else
{
/* nothing after this node */
p->next = 0;
}
/* insert node */
process_head_node->next = p;
/* modify node */
p->prev = 0;
/*increment node count */
process_head_node->number++;
}
inline void ProcessRemoveNode(process* p)
{
/* chek if not end of list */
if(p->next)
{
/* modify next and prev nodes */
p->next->prev = p->prev;
p->prev->next = p->next;
}
else
{
/* set previous node's next to zero (make it end of list) */
p->prev->next = 0;
}
/* common stuff */
process_head_node->number--;
}
inline void SchedulerInsertNode(thread* p)
{
if(schedule_list_head_node->next)
{
/* modify next node */
schedule_list_head_node->next->prev = p;
}
else
{
/* nothing after this node */
p->next = 0;
}
/* insert node */
schedule_list_head_node->next = p;
/* modify node */
p->prev = 0;
/*increment node count */
schedule_list_head_node->number++;
}
inline void SchedulerRemoveNode(thread* p)
{
/* chek if not end of list */
if(p->next)
{
/* modify next and prev nodes */
p->next->prev = p->prev;
p->prev->next = p->next;
}
else
{
/* set previous node's next to zero (make it end of list) */
p->prev->next = 0;
}
/* common stuff */
schedule_list_head_node->number--;
}
inline void SchedulerGetNextNode()
{
if(node->next)
{
node = node->next;
}
else
{
node = schedule_list_head_node->next;
}
}
Code: Select all
extern void switch_to(unsigned long);
void schedule(struct regs* r)
{
/* get next thread */
SchedulerGetNextNode();
// load next TSS
switch_to(node->gdt);
}
asm:
.globl _switch_to
_switch_to:
ljmp *(%esp) <--------- faults here
ret
So I cheked if the value pushed was ok and it is, the gdt entry excists and is valid. Is my TSS corrupted? Have been debugging for 3 days now
Thanks in advance!