Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 41 additions & 10 deletions kernel/arch/aarch64/timer/timer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,19 @@ __PRIVILEGED_CODE static void program_oneshot(uint64_t deadline_ns) {
hwtimer::write_cntv_tval(static_cast<uint32_t>(delta_ticks));
}

__PRIVILEGED_CODE static void program_next_event(timer_cpu_state& state) {
uint64_t next_event = state.next_tick_ns;
if (!state.sleep_queue.empty()) {
uint64_t front_deadline = state.sleep_queue.front()->timer_deadline;
if (front_deadline < next_event) {
next_event = front_deadline;
}
}

state.programmed_ns = next_event;
program_oneshot(next_event);
}

/**
* @note Privilege: **required**
*/
Expand Down Expand Up @@ -165,16 +178,7 @@ __PRIVILEGED_CODE bool on_interrupt() {
}
}

uint64_t next_event = state.next_tick_ns;
if (!state.sleep_queue.empty()) {
uint64_t front_deadline = state.sleep_queue.front()->timer_deadline;
if (front_deadline < next_event) {
next_event = front_deadline;
}
}

state.programmed_ns = next_event;
program_oneshot(next_event);
program_next_event(state);

sync::spin_unlock_irqrestore(state.lock, irq);

Expand Down Expand Up @@ -202,4 +206,31 @@ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns) {
sync::spin_unlock_irqrestore(state.lock, irq);
}

/**
* @note Privilege: **required**
*/
__PRIVILEGED_CODE bool cancel_sleep(sched::task* t) {
if (!t) {
return false;
}

uint32_t cpu_id = __atomic_load_n(&t->exec.cpu, __ATOMIC_ACQUIRE);
timer_cpu_state& state = per_cpu_on(cpu_timer_state, cpu_id);
sync::irq_state irq = sync::spin_lock_irqsave(state.lock);

bool linked = t->timer_link.prev != nullptr && t->timer_link.next != nullptr;
bool queued = t->timer_deadline != 0 && linked;
if (!queued) {
sync::spin_unlock_irqrestore(state.lock, irq);
return false;
}

state.sleep_queue.remove(t);
t->timer_deadline = 0;
program_next_event(state);

sync::spin_unlock_irqrestore(state.lock, irq);
return true;
}

} // namespace timer
51 changes: 41 additions & 10 deletions kernel/arch/x86_64/timer/timer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,19 @@ __PRIVILEGED_CODE static void program_oneshot(uint64_t deadline_ns) {
mmio::write32(lapic + irq::LAPIC_TIMER_ICR, count);
}

__PRIVILEGED_CODE static void program_next_event(timer_cpu_state& state) {
uint64_t next_event = state.next_tick_ns;
if (!state.sleep_queue.empty()) {
uint64_t front_deadline = state.sleep_queue.front()->timer_deadline;
if (front_deadline < next_event) {
next_event = front_deadline;
}
}

state.programmed_ns = next_event;
program_oneshot(next_event);
}

/**
* @note Privilege: **required**
*/
Expand Down Expand Up @@ -231,16 +244,7 @@ __PRIVILEGED_CODE bool on_interrupt() {
}
}

uint64_t next_event = state.next_tick_ns;
if (!state.sleep_queue.empty()) {
uint64_t front_deadline = state.sleep_queue.front()->timer_deadline;
if (front_deadline < next_event) {
next_event = front_deadline;
}
}

state.programmed_ns = next_event;
program_oneshot(next_event);
program_next_event(state);

sync::spin_unlock_irqrestore(state.lock, irq);

Expand Down Expand Up @@ -268,4 +272,31 @@ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns) {
sync::spin_unlock_irqrestore(state.lock, irq);
}

/**
* @note Privilege: **required**
*/
__PRIVILEGED_CODE bool cancel_sleep(sched::task* t) {
if (!t) {
return false;
}

uint32_t cpu_id = __atomic_load_n(&t->exec.cpu, __ATOMIC_ACQUIRE);
timer_cpu_state& state = per_cpu_on(cpu_timer_state, cpu_id);
sync::irq_state irq = sync::spin_lock_irqsave(state.lock);

bool linked = t->timer_link.prev != nullptr && t->timer_link.next != nullptr;
bool queued = t->timer_deadline != 0 && linked;
if (!queued) {
sync::spin_unlock_irqrestore(state.lock, irq);
return false;
}

state.sleep_queue.remove(t);
t->timer_deadline = 0;
program_next_event(state);

sync::spin_unlock_irqrestore(state.lock, irq);
return true;
}

} // namespace timer
3 changes: 3 additions & 0 deletions kernel/common/ring_buffer.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "common/ring_buffer.h"
#include "sched/sched.h"
#include "mm/heap.h"
#include "common/string.h"

Expand Down Expand Up @@ -77,6 +78,7 @@ __PRIVILEGED_CODE ssize_t ring_buffer_read(ring_buffer* rb, uint8_t* buf, size_t
}
while (readable_bytes(rb) == 0 && !rb->writer_closed) {
irq = sync::wait(rb->read_wq, rb->lock, irq);
sched::terminate_if_requested();
}
}

Expand Down Expand Up @@ -123,6 +125,7 @@ __PRIVILEGED_CODE ssize_t ring_buffer_write(ring_buffer* rb, const uint8_t* buf,
}
while (writable_bytes(rb) == 0 && !rb->reader_closed) {
irq = sync::wait(rb->write_wq, rb->lock, irq);
sched::terminate_if_requested();
}
}

Expand Down
42 changes: 28 additions & 14 deletions kernel/resource/providers/proc_provider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@

namespace resource::proc_provider {

constexpr int32_t PROC_FORCED_EXIT_CODE = 137;

__PRIVILEGED_CODE void proc_resource::ref_destroy(proc_resource* self) {
heap::kfree_delete(self);
}
Expand Down Expand Up @@ -37,21 +39,33 @@ __PRIVILEGED_CODE static void proc_close(resource_object* obj) {

sync::irq_state irq = sync::spin_lock_irqsave(pr->lock);

if (pr->child && pr->child->state == sched::TASK_STATE_CREATED) {
auto* child = pr->child;
pr->child = nullptr;
sync::spin_unlock_irqrestore(pr->lock, irq);

if (child->proc_res) {
(void)child->proc_res->release();
child->proc_res = nullptr;
if (!pr->detached && !pr->exited && pr->child) {
if (pr->child->state == sched::TASK_STATE_CREATED) {
auto* child = pr->child;
pr->child = nullptr;
pr->exit_code = PROC_FORCED_EXIT_CODE;
pr->exited = true;
sync::wake_all(pr->wait_queue);
sync::spin_unlock_irqrestore(pr->lock, irq);

if (child->proc_res) {
(void)child->proc_res->release();
child->proc_res = nullptr;
}
destroy_unstarted_task(child);
} else {
sched::task* child = pr->child;
sync::spin_unlock_irqrestore(pr->lock, irq);

(void)sched::request_task_terminate(child, PROC_FORCED_EXIT_CODE);

irq = sync::spin_lock_irqsave(pr->lock);
while (!pr->exited) {
irq = sync::wait(pr->wait_queue, pr->lock, irq);
sched::terminate_if_requested();
}
sync::spin_unlock_irqrestore(pr->lock, irq);
}
destroy_unstarted_task(child);
} else if (pr->child && !pr->exited && !pr->detached) {
uint32_t child_tid = pr->child->tid;
sync::spin_unlock_irqrestore(pr->lock, irq);
log::fatal("proc_close: parent exiting with running attached child tid=%u",
child_tid);
} else {
sync::spin_unlock_irqrestore(pr->lock, irq);
}
Expand Down
98 changes: 98 additions & 0 deletions kernel/sched/sched.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "mm/paging.h"
#include "common/logging.h"
#include "sync/spinlock.h"
#include "sync/wait_queue.h"
#include "smp/smp.h"
#include "hw/cpu.h"
#include "clock/clock.h"
Expand Down Expand Up @@ -62,6 +63,18 @@ static void store_cleanup_stage(task* t, uint32_t stage) {
__atomic_store_n(&t->cleanup_stage, stage, __ATOMIC_RELEASE);
}

static uint32_t load_termination_state(const task* t) {
return __atomic_load_n(&t->termination_state, __ATOMIC_ACQUIRE);
}

static void store_termination_state(task* t, uint32_t state) {
__atomic_store_n(&t->termination_state, state, __ATOMIC_RELEASE);
}

static int32_t load_termination_exit_code(const task* t) {
return __atomic_load_n(&t->termination_exit_code, __ATOMIC_ACQUIRE);
}

#ifdef DEBUG
[[noreturn]] __PRIVILEGED_CODE static void panic_invalid_privilege_state(
const char* site
Expand Down Expand Up @@ -339,6 +352,65 @@ __PRIVILEGED_CODE void wake(task* t) {
sync::spin_unlock_irqrestore(rq.lock, irq);
}

/**
* @note Privilege: **required**
*/
__PRIVILEGED_CODE bool request_task_terminate(task* t, int exit_code) {
if (!t) {
return false;
}

if (__atomic_load_n(&t->state, __ATOMIC_ACQUIRE) == TASK_STATE_DEAD) {
return false;
}

uint32_t expected = TASK_TERMINATION_NONE;
if (!__atomic_compare_exchange_n(&t->termination_state, &expected,
TASK_TERMINATION_ARMING,
false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)) {
return false;
}

__atomic_store_n(&t->termination_exit_code, exit_code, __ATOMIC_RELEASE);
store_termination_state(t, TASK_TERMINATION_REQUESTED);

uint32_t blocking_kind = __atomic_load_n(&t->blocking_kind, __ATOMIC_ACQUIRE);
void* blocking_object = __atomic_load_n(&t->blocking_object, __ATOMIC_ACQUIRE);
if (blocking_kind == TASK_BLOCKING_WAIT_QUEUE && blocking_object) {
(void)sync::cancel_wait(*reinterpret_cast<sync::wait_queue*>(blocking_object), t);
wake(t);
} else if (blocking_kind == TASK_BLOCKING_TIMER) {
(void)timer::cancel_sleep(t);
wake(t);
}

return true;
}

/**
* @note Privilege: **required**
*/
__PRIVILEGED_CODE bool termination_requested() {
task* self = current();
return self && load_termination_state(self) == TASK_TERMINATION_REQUESTED;
}

/**
* @note Privilege: **required**
*/
__PRIVILEGED_CODE void terminate_if_requested() {
task* self = current();
if (!self) {
return;
}

if (load_termination_state(self) != TASK_TERMINATION_REQUESTED) {
return;
}

exit(load_termination_exit_code(self));
}

/**
* @note Privilege: **required**
*/
Expand All @@ -354,9 +426,13 @@ __PRIVILEGED_CODE void sleep_ns(uint64_t ns) {
}

uint64_t deadline = clock::now_ns() + ns;
__atomic_store_n(&self->blocking_kind, TASK_BLOCKING_TIMER, __ATOMIC_RELEASE);
__atomic_store_n(&self->blocking_object, nullptr, __ATOMIC_RELEASE);
self->state = TASK_STATE_BLOCKED;
timer::schedule_sleep(self, deadline);
yield();
__atomic_store_n(&self->blocking_kind, TASK_BLOCKING_NONE, __ATOMIC_RELEASE);
__atomic_store_n(&self->blocking_object, nullptr, __ATOMIC_RELEASE);
}

__PRIVILEGED_CODE void sleep_us(uint64_t us) {
Expand All @@ -370,6 +446,12 @@ __PRIVILEGED_CODE void sleep_ms(uint64_t ms) {
[[noreturn]] void exit(int exit_code) {
RUN_ELEVATED({
sched::task* task = current();
store_termination_state(task, TASK_TERMINATION_EXITING);
__atomic_store_n(&task->termination_exit_code, exit_code, __ATOMIC_RELEASE);
__atomic_store_n(&task->blocking_kind, TASK_BLOCKING_NONE, __ATOMIC_RELEASE);
__atomic_store_n(&task->blocking_object, nullptr, __ATOMIC_RELEASE);

resource::close_all(task);

if (task->proc_res) {
auto* pr = task->proc_res;
Expand Down Expand Up @@ -460,6 +542,10 @@ __PRIVILEGED_CODE task* create_kernel_task(
string::memcpy(t->name, name, string::strnlen(name, TASK_NAME_MAX - 1));
t->name[string::strnlen(name, TASK_NAME_MAX - 1)] = '\0';
t->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE;
t->termination_state = TASK_TERMINATION_NONE;
t->termination_exit_code = 0;
t->blocking_kind = TASK_BLOCKING_NONE;
t->blocking_object = nullptr;
t->tlb_sync_ticket.armed = 0;
for (uint32_t i = 0; i < MAX_CPUS; i++) {
t->tlb_sync_ticket.cpu_epoch_snapshot[i] = 0;
Expand Down Expand Up @@ -656,6 +742,10 @@ __PRIVILEGED_CODE task* create_user_task(
string::memcpy(t->name, name, string::strnlen(name, TASK_NAME_MAX - 1));
t->name[string::strnlen(name, TASK_NAME_MAX - 1)] = '\0';
t->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE;
t->termination_state = TASK_TERMINATION_NONE;
t->termination_exit_code = 0;
t->blocking_kind = TASK_BLOCKING_NONE;
t->blocking_object = nullptr;
t->tlb_sync_ticket.armed = 0;
for (uint32_t i = 0; i < MAX_CPUS; i++) {
t->tlb_sync_ticket.cpu_epoch_snapshot[i] = 0;
Expand Down Expand Up @@ -707,6 +797,10 @@ __PRIVILEGED_CODE int32_t init() {
string::memcpy(idle->name, "idle", 4);
idle->name[4] = '\0';
idle->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE;
idle->termination_state = TASK_TERMINATION_NONE;
idle->termination_exit_code = 0;
idle->blocking_kind = TASK_BLOCKING_NONE;
idle->blocking_object = nullptr;
idle->tlb_sync_ticket.armed = 0;
fpu::init_state(&idle->exec.fpu_ctx);
resource::init_task_handles(idle);
Expand Down Expand Up @@ -769,6 +863,10 @@ __PRIVILEGED_CODE int32_t init_ap(uint32_t cpu_id, uintptr_t task_stack_top,
string::memcpy(idle->name, "idle", 4);
idle->name[4] = '\0';
idle->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE;
idle->termination_state = TASK_TERMINATION_NONE;
idle->termination_exit_code = 0;
idle->blocking_kind = TASK_BLOCKING_NONE;
idle->blocking_object = nullptr;
idle->tlb_sync_ticket.armed = 0;
fpu::init_state(&idle->exec.fpu_ctx);
resource::init_task_handles(idle);
Expand Down
Loading