FunnyGuy9796 wrote:I am trying to lock memory and have read that implementing a spinlock is a way to do it (not sure if there is a better way).
There are, but you are going to need spinlocks to implement those. I would do it like this: Implement spinlocks (simple flags, the way you have done it already), then implement futexes with those, then implement mutexes, semaphores, conditions, whatever, with futexes.
A futex is a pointer to an integer. It has two basic operations: Wait and wake. Wait gets the futex, the expected value of the futex, and an optional timeout. The idea is to have all waiters in a global list. The function is going to lock the global list spinlock, then verify that the futex still has the expected value, and if so suspend the calling thread (putting the waiter into the list and releasing the spinlock) until woken up or until the timeout expires (if given). The wake function takes the futex and a count of threads to wake up, and it takes the list spinlock, then looks through the list of waiters and wakes all of them up.
So basically something like:
Code: Select all
static spinlock_t futex_list_lock = SPINLOCK_INIT;
struct futex_waiter {
struct futex_waiter *next, *prev;
phys_addr_t futex_addr; /* physical address to make it possible to use these in shared memory */
struct task *task;
};
static struct futex_waiter *futex_list;
static void handle_timeout(void *task)
{
set_task_schedule_result(task, ETIMEDOUT);
set_task_state(task, TASK_RUNNABLE);
}
int futex_wait(volatile int *addr, int eval, struct timespec *to)
{
unsigned long flg = spinlock_irqsave(&futex_list_lock);
int val;
struct timer *tmr = 0;
int ret = getuser(addr, &eval);
if (ret) goto out_unlock;
ret = EAGAIN;
if (val != eval) goto out_unlock;
if (to) {
ret = ENOMEM;
tmr = register_timer(to, handle_timeout, current_task());
if (!tmr) goto out_unlock;
}
set_task_state(current_task(), TASK_INTERRUPTIBLE);
struct futex_waiter fw = {.next = futex_list_head, .prev = 0, .futex_addr = v2p(addr), .task = current_task() };
if (fw.next) fw.next->prev = &fw;
futex_list_head = &fw;
spinunlock_irqrestore(flg, &futex_list_lock);
ret = schedule(); /* so this is ETIMEDOUT if the timer expired, EINTR on signal, 0 on wake. */
if (tmr && ret != ETIMEDOUT) cancel_timer(tmr);
flg = spinlock_irqsave(&futex_list_lock);
if (fw.next) fw.next->prev = fw.prev;
if (fw.prev) fw.prev->next = fw.next;
if (futex_list_head == &fw) futex_list_head = fw.next;
out_unlock:
spinunlock_irqrestore(flg, &futex_list_lock);
return ret;
}
int futex_wake(volatile int *addr, int num)
{
unsigned long flg = spinlock_irqsave(&futex_list_lock);
phys_addr_t futex_addr = v2p(addr);
for (const struct futex_waiter *i = futex_list_head; i; i = i->next) {
if (i->futex_addr == futex_addr) {
set_task_schedule_result(i->task, 0);
set_task_state(i->task, TASK_RUNNABLE);
if (!--num) break;
}
}
spinunlock_irqrestore(flg, &futex_list_lock);
return 0;
}
With those, it is quite easy to implement mutexes (both the simple, errorchecking, and recursive kinds), semaphores, conditions, etc.
One more thing: As my pseudo-code alludes to, spinlocks ought to disable interrupts. Unless you know ahead of time that you will never need the spinlock in both interrupt and non-interrupt contexts, and really, saving a flag load and store does not seem worth this particular error source to me. So I would write this in assembler, namely like this:
Code: Select all
/* spinlock.h */
typedef volatile int spinlock_t;
#define SPINLOCK_INIT 0
unsigned long spinlock_irqsave(spinlock_t *); /* disables interrupts, returns some value that the unlock function can use to restore interrupt state */
void spinunlock_irqrestore(unsigned long, spinlock_t *); /* unlocks the spinlock, restores interrupt state */
Code: Select all
/* spinlock.S */
.global spinlock_irqsave
.type spinlock_irqsave,@function
spinlock_irqsave:
pushfq
cli
xorl %edx, %edx
popq %rax
incl %edx
1:
lock xchgl (%rdi), %edx
pause
testl %edx, %edx
jnz 1b
retq
.size spinlock_irqsave, .-spinlock_irqsave
.global spinunlock_irqrestore
.type spinunlock_irqrestore, @function
spinunlock_irqrestore:
pushq %rdi
xorl %eax, %eax
movl %eax, (%rsi)
popfq
retq