use super::{TaskContext, TaskControlBlock, ProcessControlBlock}; use alloc::sync::Arc; use lazy_static::*; use super::{fetch_task, TaskStatus}; use super::__switch; use crate::trap::TrapContext; use crate::sync::UPSafeCell; pub struct Processor { current: Option>, idle_task_cx: TaskContext, } impl Processor { pub fn new() -> Self { Self { current: None, idle_task_cx: TaskContext::zero_init(), } } fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext { &mut self.idle_task_cx as *mut _ } pub fn take_current(&mut self) -> Option> { self.current.take() } pub fn current(&self) -> Option> { self.current.as_ref().map(|task| Arc::clone(task)) } } lazy_static! { pub static ref PROCESSOR: UPSafeCell = unsafe { UPSafeCell::new(Processor::new()) }; } pub fn run_tasks() { loop { let mut processor = PROCESSOR.exclusive_access(); if let Some(task) = fetch_task() { let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); // access coming task TCB exclusively let mut task_inner = task.inner_exclusive_access(); let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext; task_inner.task_status = TaskStatus::Running; drop(task_inner); // release coming task TCB manually processor.current = Some(task); // release processor manually drop(processor); unsafe { __switch( idle_task_cx_ptr, next_task_cx_ptr, ); } } else { println!("no tasks available in run_tasks"); } } } pub fn take_current_task() -> Option> { PROCESSOR.exclusive_access().take_current() } pub fn current_task() -> Option> { PROCESSOR.exclusive_access().current() } pub fn current_process() -> Arc { current_task().unwrap().process.upgrade().unwrap() } pub fn current_user_token() -> usize { let task = current_task().unwrap(); let token = task.get_user_token(); token } pub fn current_trap_cx() -> &'static mut TrapContext { current_task().unwrap().inner_exclusive_access().get_trap_cx() } pub fn current_trap_cx_user_va() -> usize { current_task().unwrap().inner_exclusive_access().res.trap_cx_user_va() } pub fn current_kstack_top() -> usize { current_task().unwrap().inner_exclusive_access().res.kstack_top() } pub fn schedule(switched_task_cx_ptr: *mut TaskContext) { let mut processor = PROCESSOR.exclusive_access(); let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); drop(processor); unsafe { __switch( switched_task_cx_ptr, idle_task_cx_ptr, ); } }