Implement many process syscalls.

This commit is contained in:
Yifan Wu 2020-12-10 11:57:26 +08:00
parent 81bef97f09
commit 3642f9c56d
20 changed files with 383 additions and 88 deletions

View file

@ -5,7 +5,7 @@ use spin::Mutex;
use lazy_static::*;
pub struct TaskManager {
ready_queue: VecDeque<Arc<Mutex<TaskControlBlock>>>,
ready_queue: VecDeque<Arc<TaskControlBlock>>,
}
/// A simple FIFO scheduler.
@ -13,10 +13,10 @@ impl TaskManager {
pub fn new() -> Self {
Self { ready_queue: VecDeque::new(), }
}
pub fn add(&mut self, task: Arc<Mutex<TaskControlBlock>>) {
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
}
pub fn fetch(&mut self) -> Option<Arc<Mutex<TaskControlBlock>>> {
pub fn fetch(&mut self) -> Option<Arc<TaskControlBlock>> {
self.ready_queue.pop_front()
}
}
@ -25,10 +25,10 @@ lazy_static! {
pub static ref TASK_MANAGER: Mutex<TaskManager> = Mutex::new(TaskManager::new());
}
pub fn add_task(task: Arc<Mutex<TaskControlBlock>>) {
pub fn add_task(task: Arc<TaskControlBlock>) {
TASK_MANAGER.lock().add(task);
}
pub fn fetch_task() -> Option<Arc<Mutex<TaskControlBlock>>> {
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.lock().fetch()
}

View file

@ -5,17 +5,11 @@ mod manager;
mod processor;
mod pid;
use crate::loader::{get_num_app, get_app_data};
use crate::trap::TrapContext;
use core::cell::RefCell;
use lazy_static::*;
use crate::loader::{get_app_data_by_name};
use switch::__switch;
use task::{TaskControlBlock, TaskStatus};
use alloc::vec::Vec;
use alloc::sync::Arc;
use spin::Mutex;
use manager::fetch_task;
use pid::{PidHandle, pid_alloc, KernelStack};
pub use context::TaskContext;
pub use processor::{
@ -27,13 +21,21 @@ pub use processor::{
schedule,
};
pub use manager::add_task;
pub use pid::{PidHandle, pid_alloc, KernelStack};
pub fn suspend_current_and_run_next() {
// There must be an application running.
let task = current_task().unwrap();
let task_cx_ptr = task.lock().get_task_cx_ptr2();
// Change status to Ready.
task.lock().task_status = TaskStatus::Ready;
let task = take_current_task().unwrap();
// ---- temporarily hold current PCB lock
let task_cx_ptr = task.acquire_inner_lock().get_task_cx_ptr2();
// ---- release current PCB lock
// ++++ temporarily hold current PCB lock
// Change status to Ready
task.acquire_inner_lock().task_status = TaskStatus::Ready;
// ++++ release current PCB lock
// push back to ready queue.
add_task(task);
// jump to scheduling cycle
@ -41,10 +43,37 @@ pub fn suspend_current_and_run_next() {
}
pub fn exit_current_and_run_next() {
// The resource recycle mechanism needs child processes. Now we just panic!
panic!("An application exited!");
// take from Processor
let task = take_current_task().unwrap();
// **** hold current PCB lock
let mut inner = task.acquire_inner_lock();
// Change status to Zombie
inner.task_status = TaskStatus::Zombie;
// move any child to its parent
// ++++++ hold parent PCB lock here
{
let parent = inner.parent.as_ref().unwrap().upgrade().unwrap();
let mut parent_inner = parent.acquire_inner_lock();
for child in inner.children.iter() {
parent_inner.children.push(child.clone());
}
}
// ++++++ release parent PCB lock here
inner.children.clear();
// deallocate user space
inner.memory_set.clear();
drop(inner);
// **** release current PCB lock
// drop task manually to maintain rc correctly
drop(task);
// we do not have to save task context
let _unused: usize = 0;
schedule(&_unused as *const _);
}
pub fn add_application(elf_data: &[u8], app_id: usize) {
add_task(Arc::new(Mutex::new(TaskControlBlock::new(elf_data, app_id))));
pub fn add_initproc() {
let data = get_app_data_by_name("initproc").unwrap();
add_task(Arc::new(TaskControlBlock::new(data)));
}

View file

@ -46,6 +46,7 @@ pub struct PidHandle(pub usize);
impl Drop for PidHandle {
fn drop(&mut self) {
//println!("drop pid {}", self.0);
PID_ALLOCATOR.lock().dealloc(self.0);
}
}
@ -82,7 +83,7 @@ impl KernelStack {
}
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
ptr_mut

View file

@ -2,7 +2,7 @@ use super::TaskControlBlock;
use alloc::sync::Arc;
use spin::Mutex;
use lazy_static::*;
use super::{add_task, fetch_task};
use super::{fetch_task, TaskStatus};
use super::__switch;
use crate::trap::TrapContext;
@ -13,7 +13,7 @@ pub struct Processor {
unsafe impl Sync for Processor {}
struct ProcessorInner {
current: Option<Arc<Mutex<TaskControlBlock>>>,
current: Option<Arc<TaskControlBlock>>,
idle_task_cx_ptr: usize,
}
@ -31,13 +31,13 @@ impl Processor {
&inner.idle_task_cx_ptr as *const usize
}
pub fn run(&self) {
//println!("into Processor::run");
loop {
if let Some(task) = fetch_task() {
//println!("found task!");
let idle_task_cx_ptr = self.get_idle_task_cx_ptr2();
let next_task_cx_ptr = task.lock().get_task_cx_ptr2();
//println!("next_task_cx_ptr={:p}", next_task_cx_ptr);
// acquire
let next_task_cx_ptr = task.acquire_inner_lock().get_task_cx_ptr2();
task.acquire_inner_lock().task_status = TaskStatus::Running;
// release
self.inner.lock().current = Some(task);
unsafe {
__switch(
@ -48,10 +48,10 @@ impl Processor {
}
}
}
pub fn take_current(&self) -> Option<Arc<Mutex<TaskControlBlock>>> {
pub fn take_current(&self) -> Option<Arc<TaskControlBlock>> {
self.inner.lock().current.take()
}
pub fn current(&self) -> Option<Arc<Mutex<TaskControlBlock>>> {
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.inner.lock().current.as_ref().map(|task| task.clone())
}
}
@ -64,25 +64,22 @@ pub fn run_tasks() {
PROCESSOR.run();
}
pub fn take_current_task() -> Option<Arc<Mutex<TaskControlBlock>>> {
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.take_current()
}
pub fn current_task() -> Option<Arc<Mutex<TaskControlBlock>>> {
//println!("into current_task!");
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.current()
}
pub fn current_user_token() -> usize {
//println!("into current_user_token!");
let task = current_task().unwrap();
//println!("Got task in current_user_token!");
let token = task.lock().get_user_token();
let token = task.acquire_inner_lock().get_user_token();
token
}
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().as_ref().lock().get_trap_cx()
current_task().unwrap().acquire_inner_lock().get_trap_cx()
}
pub fn schedule(switched_task_cx_ptr2: *const usize) {

View file

@ -1,22 +1,32 @@
use crate::mm::{MemorySet, MapPermission, PhysPageNum, KERNEL_SPACE, VirtAddr};
use crate::mm::{MemorySet, PhysPageNum, KERNEL_SPACE, VirtAddr};
use crate::trap::{TrapContext, trap_handler};
use crate::config::{TRAP_CONTEXT, kernel_stack_position};
use crate::config::{TRAP_CONTEXT};
use super::TaskContext;
use super::{PidHandle, pid_alloc, KernelStack};
use alloc::sync::{Weak, Arc};
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
pub struct TaskControlBlock {
// immutable
pub trap_cx_ppn: PhysPageNum,
pub base_size: usize,
pub pid: PidHandle,
pub kernel_stack: KernelStack,
// mutable
inner: Mutex<TaskControlBlockInner>,
}
pub struct TaskControlBlockInner {
pub trap_cx_ppn: PhysPageNum,
pub base_size: usize,
pub task_cx_ptr: usize,
pub task_status: TaskStatus,
pub memory_set: MemorySet,
pub parent: Option<Weak<TaskControlBlock>>,
pub children: Vec<Arc<TaskControlBlock>>,
pub exit_code: i32,
}
impl TaskControlBlock {
impl TaskControlBlockInner {
pub fn get_task_cx_ptr2(&self) -> *const usize {
&self.task_cx_ptr as *const usize
}
@ -26,7 +36,19 @@ impl TaskControlBlock {
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
pub fn new(elf_data: &[u8], app_id: usize) -> Self {
fn get_status(&self) -> TaskStatus {
self.task_status
}
pub fn is_zombie(&self) -> bool {
self.get_status() == TaskStatus::Zombie
}
}
impl TaskControlBlock {
pub fn acquire_inner_lock(&self) -> MutexGuard<TaskControlBlockInner> {
self.inner.lock()
}
pub fn new(elf_data: &[u8]) -> Self {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
@ -41,16 +63,23 @@ impl TaskControlBlock {
// push a task context which goes to trap_return to the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
let task_control_block = Self {
trap_cx_ppn,
base_size: user_sp,
pid: pid_handle,
kernel_stack,
task_cx_ptr: task_cx_ptr as usize,
task_status,
memory_set,
inner: Mutex::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: user_sp,
task_cx_ptr: task_cx_ptr as usize,
task_status,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
}),
};
// prepare TrapContext in user space
let trap_cx = task_control_block.get_trap_cx();
// ---- acquire child PCB lock
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx();
// ---- release child PCB lock
*trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
@ -60,12 +89,86 @@ impl TaskControlBlock {
);
task_control_block
}
pub fn exec(&self, elf_data: &[u8]) {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// **** hold current PCB lock
let mut inner = self.inner.lock();
// substitute memory_set
inner.memory_set = memory_set;
// update trap_cx ppn
inner.trap_cx_ppn = trap_cx_ppn;
drop(inner);
// **** release current PCB lock manually
// initialize trap_cx
// **** acquire current PCB lock
let trap_cx = self.acquire_inner_lock().get_trap_cx();
// **** release current PCB lock
*trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.lock().token(),
self.kernel_stack.get_top(),
trap_handler as usize,
);
}
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
// ---- hold parent PCB lock
let mut parent_inner = self.inner.lock();
// copy user space(include trap context)
let memory_set = MemorySet::from_existed_user(
&parent_inner.memory_set
);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
let task_status = TaskStatus::Ready;
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
// push a goto_trap_return task_cx on the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
let task_control_block = Arc::new(TaskControlBlock {
pid: pid_handle,
kernel_stack,
inner: Mutex::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: parent_inner.base_size,
task_cx_ptr: task_cx_ptr as usize,
task_status,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
}),
});
// add child
parent_inner.children.push(task_control_block.clone());
// modify kernel_sp in trap_cx
// **** acquire child PCB lock
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx();
// **** release child PCB lock
trap_cx.kernel_sp = kernel_stack_top;
// return
task_control_block
// ---- release parent PCB lock
}
pub fn getpid(&self) -> usize {
self.pid.0
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum TaskStatus {
Ready,
Running,
Exited,
Zombie,
}