Debugging sys_exec :(

This commit is contained in:
Yifan Wu 2021-09-30 10:09:21 -07:00
parent 6d88ef9d99
commit ad0a7bcaa1
14 changed files with 371 additions and 182 deletions

View file

@ -8,7 +8,7 @@ pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc; pub const PAGE_SIZE_BITS: usize = 0xc;
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1; pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
pub const TRAP_CONTEXT: usize = TRAMPOLINE - PAGE_SIZE; pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
#[cfg(feature = "board_k210")] #[cfg(feature = "board_k210")]
pub const CLOCK_FREQ: usize = 403000000 / 62; pub const CLOCK_FREQ: usize = 403000000 / 62;
@ -39,4 +39,4 @@ pub const MMIO: &[(usize, usize)] = &[
(0x5200_0000, 0x1000), /* SPI0 */ (0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */ (0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */ (0x5400_0000, 0x1000), /* SPI2 */
]; ];

View file

@ -1,5 +1,6 @@
use core::panic::PanicInfo; use core::panic::PanicInfo;
use crate::sbi::shutdown; use crate::sbi::shutdown;
use crate::task::current_kstack_top;
#[panic_handler] #[panic_handler]
fn panic(info: &PanicInfo) -> ! { fn panic(info: &PanicInfo) -> ! {
@ -8,5 +9,19 @@ fn panic(info: &PanicInfo) -> ! {
} else { } else {
println!("[kernel] Panicked: {}", info.message().unwrap()); println!("[kernel] Panicked: {}", info.message().unwrap());
} }
unsafe { backtrace(); }
shutdown() shutdown()
} }
unsafe fn backtrace() {
let mut fp: usize;
let stop = current_kstack_top();
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop { break; }
println!("#{}:ra={:#x}", i, *((fp-8) as *const usize));
fp = *((fp-16) as *const usize);
}
println!("---END BACKTRACE---");
}

View file

@ -52,4 +52,4 @@ pub fn rust_main() -> ! {
task::add_initproc(); task::add_initproc();
task::run_tasks(); task::run_tasks();
panic!("Unreachable in rust_main!"); panic!("Unreachable in rust_main!");
} }

View file

@ -12,8 +12,6 @@ use crate::config::{
MEMORY_END, MEMORY_END,
PAGE_SIZE, PAGE_SIZE,
TRAMPOLINE, TRAMPOLINE,
TRAP_CONTEXT,
USER_STACK_SIZE,
MMIO, MMIO,
}; };
@ -142,8 +140,8 @@ impl MemorySet {
} }
memory_set memory_set
} }
/// Include sections in elf and trampoline and TrapContext and user stack, /// Include sections in elf and trampoline,
/// also returns user_sp and entry point. /// also returns user_sp_base and entry point.
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) { pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
let mut memory_set = Self::new_bare(); let mut memory_set = Self::new_bare();
// map trampoline // map trampoline
@ -178,10 +176,10 @@ impl MemorySet {
); );
} }
} }
// map user stack with U flags
let max_end_va: VirtAddr = max_end_vpn.into(); let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_bottom: usize = max_end_va.into(); let mut user_stack_base: usize = max_end_va.into();
(memory_set, user_stack_bottom, elf.header.pt2.entry_point() as usize) user_stack_base += PAGE_SIZE;
(memory_set, user_stack_base, elf.header.pt2.entry_point() as usize)
} }
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet { pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare(); let mut memory_set = Self::new_bare();

View file

@ -252,4 +252,4 @@ impl Iterator for UserBufferIterator {
Some(r) Some(r)
} }
} }
} }

View file

@ -4,14 +4,14 @@ use crate::mm::{
translated_refmut, translated_refmut,
translated_str, translated_str,
}; };
use crate::task::{current_user_token, current_task}; use crate::task::{current_user_token, current_process};
use crate::fs::{make_pipe, OpenFlags, open_file}; use crate::fs::{make_pipe, OpenFlags, open_file};
use alloc::sync::Arc; use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize { pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token(); let token = current_user_token();
let task = current_task().unwrap(); let process = current_process();
let inner = task.inner_exclusive_access(); let inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -32,8 +32,8 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize { pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token(); let token = current_user_token();
let task = current_task().unwrap(); let process = current_process();
let inner = task.inner_exclusive_access(); let inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -53,14 +53,14 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
} }
pub fn sys_open(path: *const u8, flags: u32) -> isize { pub fn sys_open(path: *const u8, flags: u32) -> isize {
let task = current_task().unwrap(); let process = current_process();
let token = current_user_token(); let token = current_user_token();
let path = translated_str(token, path); let path = translated_str(token, path);
if let Some(inode) = open_file( if let Some(inode) = open_file(
path.as_str(), path.as_str(),
OpenFlags::from_bits(flags).unwrap() OpenFlags::from_bits(flags).unwrap()
) { ) {
let mut inner = task.inner_exclusive_access(); let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd(); let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode); inner.fd_table[fd] = Some(inode);
fd as isize fd as isize
@ -70,8 +70,8 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
} }
pub fn sys_close(fd: usize) -> isize { pub fn sys_close(fd: usize) -> isize {
let task = current_task().unwrap(); let process = current_process();
let mut inner = task.inner_exclusive_access(); let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -83,9 +83,9 @@ pub fn sys_close(fd: usize) -> isize {
} }
pub fn sys_pipe(pipe: *mut usize) -> isize { pub fn sys_pipe(pipe: *mut usize) -> isize {
let task = current_task().unwrap(); let process = current_process();
let token = current_user_token(); let token = current_user_token();
let mut inner = task.inner_exclusive_access(); let mut inner = process.inner_exclusive_access();
let (pipe_read, pipe_write) = make_pipe(); let (pipe_read, pipe_write) = make_pipe();
let read_fd = inner.alloc_fd(); let read_fd = inner.alloc_fd();
inner.fd_table[read_fd] = Some(pipe_read); inner.fd_table[read_fd] = Some(pipe_read);
@ -97,8 +97,8 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
} }
pub fn sys_dup(fd: usize) -> isize { pub fn sys_dup(fd: usize) -> isize {
let task = current_task().unwrap(); let process = current_process();
let mut inner = task.inner_exclusive_access(); let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -108,4 +108,4 @@ pub fn sys_dup(fd: usize) -> isize {
let new_fd = inner.alloc_fd(); let new_fd = inner.alloc_fd();
inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap())); inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap()));
new_fd as isize new_fd as isize
} }

View file

@ -2,8 +2,8 @@ use crate::task::{
suspend_current_and_run_next, suspend_current_and_run_next,
exit_current_and_run_next, exit_current_and_run_next,
current_task, current_task,
current_process,
current_user_token, current_user_token,
add_task,
}; };
use crate::timer::get_time_ms; use crate::timer::get_time_ms;
use crate::mm::{ use crate::mm::{
@ -34,20 +34,20 @@ pub fn sys_get_time() -> isize {
} }
pub fn sys_getpid() -> isize { pub fn sys_getpid() -> isize {
current_task().unwrap().pid.0 as isize current_task().unwrap().process.upgrade().unwrap().getpid() as isize
} }
pub fn sys_fork() -> isize { pub fn sys_fork() -> isize {
let current_task = current_task().unwrap(); let current_process = current_process();
let new_task = current_task.fork(); let new_process = current_process.fork();
let new_pid = new_task.pid.0; let new_pid = new_process.getpid();
// modify trap context of new_task, because it returns immediately after switching // modify trap context of new_task, because it returns immediately after switching
let trap_cx = new_task.inner_exclusive_access().get_trap_cx(); let new_process_inner = new_process.inner_exclusive_access();
let task = new_process_inner.tasks[0].as_ref().unwrap();
let trap_cx = task.inner_exclusive_access().get_trap_cx();
// we do not have to move to next instruction since we have done it before // we do not have to move to next instruction since we have done it before
// for child process, fork returns 0 // for child process, fork returns 0
trap_cx.x[10] = 0; trap_cx.x[10] = 0;
// add new task to scheduler
add_task(new_task);
new_pid as isize new_pid as isize
} }
@ -65,9 +65,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
} }
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) { if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all(); let all_data = app_inode.read_all();
let task = current_task().unwrap(); let process = current_process();
let argc = args_vec.len(); let argc = args_vec.len();
task.exec(all_data.as_slice(), args_vec); process.exec(all_data.as_slice(), args_vec);
// return argc because cx.x[10] will be covered with it later // return argc because cx.x[10] will be covered with it later
argc as isize argc as isize
} else { } else {
@ -78,11 +78,10 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
/// If there is not a child process whose pid is same as given, return -1. /// If there is not a child process whose pid is same as given, return -1.
/// Else if there is a child process but it is still running, return -2. /// Else if there is a child process but it is still running, return -2.
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize { pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
let task = current_task().unwrap(); let process = current_process();
// find a child process // find a child process
// ---- access current PCB exclusively let mut inner = process.inner_exclusive_access();
let mut inner = task.inner_exclusive_access();
if inner.children if inner.children
.iter() .iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()}) .find(|p| {pid == -1 || pid as usize == p.getpid()})
@ -95,7 +94,7 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
.enumerate() .enumerate()
.find(|(_, p)| { .find(|(_, p)| {
// ++++ temporarily access child PCB exclusively // ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie() && (pid == -1 || pid as usize == p.getpid()) p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB // ++++ release child PCB
}); });
if let Some((idx, _)) = pair { if let Some((idx, _)) = pair {
@ -112,4 +111,4 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
-2 -2
} }
// ---- release current PCB automatically // ---- release current PCB automatically
} }

View file

@ -1,12 +1,8 @@
use alloc::{vec::Vec, sync::Arc}; use alloc::{vec::Vec, sync::Arc};
use lazy_static::*; use lazy_static::*;
use crate::sync::UPSafeCell; use crate::sync::UPSafeCell;
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr}; use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
use crate::config::{ use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
PAGE_SIZE,
TRAMPOLINE,
KERNEL_STACK_SIZE,
};
use super::ProcessControlBlock; use super::ProcessControlBlock;
pub struct RecycleAllocator { pub struct RecycleAllocator {
@ -110,17 +106,104 @@ impl KernelStack {
pub struct TaskUserRes { pub struct TaskUserRes {
pub tid: usize, pub tid: usize,
pub ustack_base: usize,
pub kstack: KernelStack, pub kstack: KernelStack,
pub process: Arc<ProcessControlBlock>, pub process: Arc<ProcessControlBlock>,
} }
impl Drop for TaskUserRes { fn trap_cx_bottom_from_tid(tid: usize) -> usize {
fn drop(&mut self) { TRAP_CONTEXT_BASE - tid * PAGE_SIZE
}
fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
}
impl TaskUserRes {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let tid = process.inner_exclusive_access().alloc_tid();
let kstack = kstack_alloc();
let task_user_res = Self {
tid,
ustack_base,
kstack,
process: Arc::clone(&process),
};
if alloc_user_res {
task_user_res.alloc_user_res();
}
task_user_res
}
pub fn alloc_user_res(&self) {
let mut process = self.process.inner_exclusive_access();
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process
.memory_set
.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process
.memory_set
.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
pub fn dealloc_tid(&self) {
let mut process = self.process.inner_exclusive_access();
process.dealloc_tid(self.tid);
}
fn dealloc_user_res(&self) {
// dealloc tid // dealloc tid
let mut process = self.process.inner_exclusive_access();
process.dealloc_tid(self.tid);
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process.memory_set.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process.memory_set.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
pub fn trap_cx_user_va(&self) -> usize {
trap_cx_bottom_from_tid(self.tid)
}
pub fn trap_cx_ppn(&self) -> PhysPageNum {
let process = self.process.inner_exclusive_access(); let process = self.process.inner_exclusive_access();
process.task_res_allocator.dealloc(self.tid); let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
// dealloc trap_cx process.memory_set.translate(trap_cx_bottom_va.into()).unwrap().ppn()
process.dealloc_trap_cx(self.tid); }
// kstack can be deallocated automatically
pub fn ustack_base(&self) -> usize { self.ustack_base }
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
pub fn kstack_top(&self) -> usize {
self.kstack.get_top()
} }
} }
impl Drop for TaskUserRes {
fn drop(&mut self) {
self.dealloc_user_res();
// kstack can also be deallocated automatically
}
}

View file

@ -13,14 +13,16 @@ use alloc::sync::Arc;
use manager::fetch_task; use manager::fetch_task;
use lazy_static::*; use lazy_static::*;
use process::ProcessControlBlock; use process::ProcessControlBlock;
use id::RecycleAllocator;
pub use context::TaskContext; pub use context::TaskContext;
pub use processor::{ pub use processor::{
run_tasks, run_tasks,
current_task, current_task,
current_process,
current_user_token, current_user_token,
current_trap_cx_user_va,
current_trap_cx, current_trap_cx,
current_kstack_top,
take_current_task, take_current_task,
schedule, schedule,
}; };
@ -42,7 +44,7 @@ pub fn suspend_current_and_run_next() {
// Change status to Ready // Change status to Ready
task_inner.task_status = TaskStatus::Ready; task_inner.task_status = TaskStatus::Ready;
drop(task_inner); drop(task_inner);
// ---- release current PCB // ---- release current TCB
// push back to ready queue. // push back to ready queue.
add_task(task); add_task(task);
@ -53,30 +55,35 @@ pub fn suspend_current_and_run_next() {
pub fn exit_current_and_run_next(exit_code: i32) { pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor // take from Processor
let task = take_current_task().unwrap(); let task = take_current_task().unwrap();
// **** access current TCB exclusively let task_exit_code = task.inner_exclusive_access().exit_code;
let mut inner = task.inner_exclusive_access(); let tid = task.inner_exclusive_access().res.tid;
// Change status to Zombie // remove thread
inner.task_status = TaskStatus::Zombie; let process = task.process.upgrade().unwrap();
// Record exit code let mut process_inner = process.inner_exclusive_access();
inner.exit_code = exit_code; process_inner.tasks.drain(tid..tid + 1);
// do not move to its parent but under initproc // if this is the main thread of the process, then we need terminate this process
if tid == 0 {
// mark this process as a zombie process
process_inner.is_zombie = true;
// record exit code of main process
process_inner.exit_code = task_exit_code;
// ++++++ access initproc TCB exclusively {
{ // move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access(); let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in inner.children.iter() { for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC)); child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone()); initproc_inner.children.push(child.clone());
}
} }
}
// ++++++ release parent PCB
inner.children.clear(); process_inner.children.clear();
// deallocate user space // deallocate user space as soon as possible
inner.memory_set.recycle_data_pages(); process_inner.memory_set.recycle_data_pages();
drop(inner); }
// **** release current PCB // maintain rc of process manually since we will break this context soon
// drop task manually to maintain rc correctly drop(process_inner);
drop(process);
drop(task); drop(task);
// we do not have to save task context // we do not have to save task context
let mut _unused = TaskContext::zero_init(); let mut _unused = TaskContext::zero_init();
@ -84,13 +91,13 @@ pub fn exit_current_and_run_next(exit_code: i32) {
} }
lazy_static! { lazy_static! {
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new({ pub static ref INITPROC: Arc<ProcessControlBlock> = {
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap(); let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all(); let v = inode.read_all();
TaskControlBlock::new(v.as_slice()) ProcessControlBlock::new(v.as_slice())
}); };
} }
pub fn add_initproc() { pub fn add_initproc() {
add_task(INITPROC.clone()); let initproc = INITPROC.clone();
} }

View file

@ -1,16 +1,17 @@
use crate::mm::{ use crate::mm::{
MemorySet, MemorySet,
KERNEL_SPACE, KERNEL_SPACE,
VirtAddr,
translated_refmut, translated_refmut,
}; };
use crate::task::TaskContext;
use crate::task::id::TaskUserRes;
use crate::trap::{TrapContext, trap_handler}; use crate::trap::{TrapContext, trap_handler};
use crate::config::TRAP_CONTEXT;
use crate::sync::UPSafeCell; use crate::sync::UPSafeCell;
use core::cell::RefMut; use core::cell::RefMut;
use super::id::RecycleAllocator; use super::id::RecycleAllocator;
use super::{TaskContext, TaskControlBlock}; use super::TaskControlBlock;
use super::{PidHandle, pid_alloc, KernelStack, kstack_alloc}; use super::{PidHandle, pid_alloc};
use super::add_task;
use alloc::sync::{Weak, Arc}; use alloc::sync::{Weak, Arc};
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
@ -25,14 +26,13 @@ pub struct ProcessControlBlock {
} }
pub struct ProcessControlBlockInner { pub struct ProcessControlBlockInner {
pub base_size: usize,
pub is_zombie: bool, pub is_zombie: bool,
pub memory_set: MemorySet, pub memory_set: MemorySet,
pub parent: Option<Weak<ProcessControlBlock>>, pub parent: Option<Weak<ProcessControlBlock>>,
pub children: Vec<Arc<ProcessControlBlock>>, pub children: Vec<Arc<ProcessControlBlock>>,
pub exit_code: i32, pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>, pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub tasks: Vec<Option<Weak<TaskControlBlock>>>, pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
pub task_res_allocator: RecycleAllocator, pub task_res_allocator: RecycleAllocator,
} }
@ -40,6 +40,7 @@ impl ProcessControlBlockInner {
pub fn get_user_token(&self) -> usize { pub fn get_user_token(&self) -> usize {
self.memory_set.token() self.memory_set.token()
} }
pub fn alloc_fd(&mut self) -> usize { pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len()) if let Some(fd) = (0..self.fd_table.len())
.find(|fd| self.fd_table[*fd].is_none()) { .find(|fd| self.fd_table[*fd].is_none()) {
@ -49,9 +50,21 @@ impl ProcessControlBlockInner {
self.fd_table.len() - 1 self.fd_table.len() - 1
} }
} }
pub fn dealloc_trap_cx(&mut self, tid: usize) {
unimplemented!(); pub fn alloc_tid(&mut self) -> usize {
//self.memory_set.remove_area_with_start_vpn() self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize){
self.task_res_allocator.dealloc(tid)
}
pub fn thread_count(&self) -> usize {
self.tasks.len()
}
pub fn get_task(&self, tid: usize) -> Arc<TaskControlBlock> {
self.tasks[tid].as_ref().unwrap().clone()
} }
} }
@ -60,25 +73,15 @@ impl ProcessControlBlock {
self.inner.exclusive_access() self.inner.exclusive_access()
} }
pub fn new(elf_data: &[u8]) -> Self { pub fn new(elf_data: &[u8]) -> Arc<Self> {
// memory_set with elf program headers/trampoline/trap context/user stack // memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data); let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set // allocate a pid
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc(); let pid_handle = pid_alloc();
let kstack = kstack_alloc(); let process = Arc::new(Self {
let kernel_stack_top = kstack.get_top();
let task_control_block = Self {
pid: pid_handle, pid: pid_handle,
kernel_stack, inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
inner: unsafe { UPSafeCell::new(TaskControlBlockInner { is_zombie: false,
trap_cx_ppn,
base_size: user_sp,
task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready,
memory_set, memory_set,
parent: None, parent: None,
children: Vec::new(), children: Vec::new(),
@ -91,33 +94,61 @@ impl ProcessControlBlock {
// 2 -> stderr // 2 -> stderr
Some(Arc::new(Stdout)), Some(Arc::new(Stdout)),
], ],
})}, tasks: Vec::new(),
}; task_res_allocator: RecycleAllocator::new(),
// prepare TrapContext in user space })}
let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx(); });
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
ustack_base,
true,
));
// prepare trap_cx of main thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
let ustack_top = task_inner.res.ustack_top();
let kstack_top = task_inner.res.kstack_top();
drop(task_inner);
*trap_cx = TrapContext::app_init_context( *trap_cx = TrapContext::app_init_context(
entry_point, entry_point,
user_sp, ustack_top,
KERNEL_SPACE.exclusive_access().token(), KERNEL_SPACE.exclusive_access().token(),
kernel_stack_top, kstack_top,
trap_handler as usize, trap_handler as usize,
); );
task_control_block // add main thread to the process
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
// add main thread to scheduler
add_task(task);
process
} }
pub fn exec(&self, elf_data: &[u8], args: Vec<String>) {
/// Only support processes with a single thread.
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
// memory_set with elf program headers/trampoline/trap context/user stack // memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, mut user_sp, entry_point) = MemorySet::from_elf(elf_data); let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set let new_token = memory_set.token();
.translate(VirtAddr::from(TRAP_CONTEXT).into()) // substitute memory_set
.unwrap() self.inner_exclusive_access().memory_set = memory_set;
.ppn(); // then we alloc user resource for main thread again
// since memory_set has been changed
let task = self.inner_exclusive_access().get_task(0);
let mut task_inner = task.inner_exclusive_access();
task_inner.res.dealloc_tid();
task_inner.res.ustack_base = ustack_base;
task_inner.res.alloc_user_res();
// push arguments on user stack // push arguments on user stack
let mut user_sp = task_inner.res.ustack_top();
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>(); user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp; let argv_base = user_sp;
let mut argv: Vec<_> = (0..=args.len()) let mut argv: Vec<_> = (0..=args.len())
.map(|arg| { .map(|arg| {
translated_refmut( translated_refmut(
memory_set.token(), new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize (argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
) )
}) })
@ -128,86 +159,84 @@ impl ProcessControlBlock {
*argv[i] = user_sp; *argv[i] = user_sp;
let mut p = user_sp; let mut p = user_sp;
for c in args[i].as_bytes() { for c in args[i].as_bytes() {
*translated_refmut(memory_set.token(), p as *mut u8) = *c; *translated_refmut(new_token, p as *mut u8) = *c;
p += 1; p += 1;
} }
*translated_refmut(memory_set.token(), p as *mut u8) = 0; *translated_refmut(new_token, p as *mut u8) = 0;
} }
// make the user_sp aligned to 8B for k210 platform // make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>(); user_sp -= user_sp % core::mem::size_of::<usize>();
// **** access current TCB exclusively
let mut inner = self.inner_exclusive_access();
// substitute memory_set
inner.memory_set = memory_set;
// update trap_cx ppn
inner.trap_cx_ppn = trap_cx_ppn;
// initialize trap_cx // initialize trap_cx
let mut trap_cx = TrapContext::app_init_context( let mut trap_cx = TrapContext::app_init_context(
entry_point, entry_point,
user_sp, user_sp,
KERNEL_SPACE.exclusive_access().token(), KERNEL_SPACE.exclusive_access().token(),
self.kernel_stack.get_top(), task_inner.res.kstack_top(),
trap_handler as usize, trap_handler as usize,
); );
trap_cx.x[10] = args.len(); trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base; trap_cx.x[11] = argv_base;
*inner.get_trap_cx() = trap_cx; *task_inner.get_trap_cx() = trap_cx;
// **** release current PCB task_inner.task_cx = TaskContext::goto_trap_return(task_inner.res.kstack_top());
} }
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
// ---- hold parent PCB lock /// Only support processes with a single thread.
let mut parent_inner = self.inner_exclusive_access(); pub fn fork(self: &Arc<Self>) -> Arc<Self> {
// copy user space(include trap context) let mut parent = self.inner_exclusive_access();
let memory_set = MemorySet::from_existed_user( assert_eq!(parent.thread_count(), 1);
&parent_inner.memory_set // clone parent's memory_set completely including trampoline/ustacks/trap_cxs
); let memory_set = MemorySet::from_existed_user(&parent.memory_set);
let trap_cx_ppn = memory_set // alloc a pid
.translate(VirtAddr::from(TRAP_CONTEXT).into()) let pid = pid_alloc();
.unwrap()
.ppn();
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
// copy fd table // copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new(); let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent_inner.fd_table.iter() { for fd in parent.fd_table.iter() {
if let Some(file) = fd { if let Some(file) = fd {
new_fd_table.push(Some(file.clone())); new_fd_table.push(Some(file.clone()));
} else { } else {
new_fd_table.push(None); new_fd_table.push(None);
} }
} }
let task_control_block = Arc::new(TaskControlBlock { // create child process pcb
pid: pid_handle, let child = Arc::new(Self {
kernel_stack, pid,
inner: unsafe { UPSafeCell::new(TaskControlBlockInner { inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
trap_cx_ppn, is_zombie: false,
base_size: parent_inner.base_size, memory_set,
task_cx: TaskContext::goto_trap_return(kernel_stack_top), parent: Some(Arc::downgrade(self)),
task_status: TaskStatus::Ready, children: Vec::new(),
memory_set, exit_code: 0,
parent: Some(Arc::downgrade(self)), fd_table: new_fd_table,
children: Vec::new(), tasks: Vec::new(),
exit_code: 0, task_res_allocator: RecycleAllocator::new(),
fd_table: new_fd_table, })}
})},
}); });
// add child // add child
parent_inner.children.push(task_control_block.clone()); parent.children.push(Arc::clone(&child));
// modify kernel_sp in trap_cx // create main thread of child process
// **** access child PCB exclusively let task = Arc::new(TaskControlBlock::new(
let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx(); Arc::clone(&child),
trap_cx.kernel_sp = kernel_stack_top; parent.get_task(0).inner_exclusive_access().res.ustack_base(),
// return // here we do not allocate trap_cx or ustack again
task_control_block // but mention that we allocate a new kstack here
// **** release child PCB false,
// ---- release parent PCB ));
// attach task to child process
let mut child_inner = child.inner_exclusive_access();
child_inner.tasks.push(Some(Arc::clone(&task)));
drop(child_inner);
// modify kstack_top in trap_cx of this thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task_inner.res.kstack_top();
drop(task_inner);
// add this thread to scheduler
add_task(task);
child
} }
pub fn getpid(&self) -> usize { pub fn getpid(&self) -> usize {
self.pid.0 self.pid.0
} }
} }

View file

@ -1,4 +1,4 @@
use super::{TaskContext, TaskControlBlock}; use super::{TaskContext, TaskControlBlock, ProcessControlBlock};
use alloc::sync::Arc; use alloc::sync::Arc;
use lazy_static::*; use lazy_static::*;
use super::{fetch_task, TaskStatus}; use super::{fetch_task, TaskStatus};
@ -55,6 +55,8 @@ pub fn run_tasks() {
next_task_cx_ptr, next_task_cx_ptr,
); );
} }
} else {
println!("no tasks available in run_tasks");
} }
} }
} }
@ -67,9 +69,13 @@ pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().current() PROCESSOR.exclusive_access().current()
} }
pub fn current_process() -> Arc<ProcessControlBlock> {
current_task().unwrap().process.upgrade().unwrap()
}
pub fn current_user_token() -> usize { pub fn current_user_token() -> usize {
let task = current_task().unwrap(); let task = current_task().unwrap();
let token = task.inner_exclusive_access().get_user_token(); let token = task.get_user_token();
token token
} }
@ -77,6 +83,14 @@ pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().inner_exclusive_access().get_trap_cx() current_task().unwrap().inner_exclusive_access().get_trap_cx()
} }
pub fn current_trap_cx_user_va() -> usize {
current_task().unwrap().inner_exclusive_access().res.trap_cx_user_va()
}
pub fn current_kstack_top() -> usize {
current_task().unwrap().inner_exclusive_access().res.kstack_top()
}
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) { pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let mut processor = PROCESSOR.exclusive_access(); let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();

View file

@ -1,22 +1,35 @@
use alloc::sync::Arc; use alloc::sync::{Arc, Weak};
use crate::{mm::PhysPageNum, sync::UPSafeCell}; use crate::{mm::PhysPageNum, sync::UPSafeCell};
use crate::trap::TrapContext; use crate::trap::TrapContext;
use super::id::TaskUserRes;
use super::{ use super::{
KernelStack,
ProcessControlBlock, ProcessControlBlock,
TaskContext TaskContext
}; };
use core::cell::RefMut;
pub struct TaskControlBlock { pub struct TaskControlBlock {
// immutable // immutable
pub tid: usize, pub process: Weak<ProcessControlBlock>,
pub kstack: KernelStack,
pub process: Arc<ProcessControlBlock>,
// mutable // mutable
inner: UPSafeCell<TaskControlBlockInner>, inner: UPSafeCell<TaskControlBlockInner>,
} }
impl TaskControlBlock {
pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> {
self.inner.exclusive_access()
}
pub fn get_user_token(&self) -> usize {
let process = self.process.upgrade().unwrap();
let inner = process.inner_exclusive_access();
inner.memory_set.token()
}
}
pub struct TaskControlBlockInner { pub struct TaskControlBlockInner {
pub res: TaskUserRes,
pub trap_cx_ppn: PhysPageNum, pub trap_cx_ppn: PhysPageNum,
pub task_cx: TaskContext, pub task_cx: TaskContext,
pub task_status: TaskStatus, pub task_status: TaskStatus,
@ -27,11 +40,37 @@ impl TaskControlBlockInner {
pub fn get_trap_cx(&self) -> &'static mut TrapContext { pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut() self.trap_cx_ppn.get_mut()
} }
fn get_status(&self) -> TaskStatus { fn get_status(&self) -> TaskStatus {
self.task_status self.task_status
} }
} }
impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
let kstack_top = res.kstack_top();
Self {
process: Arc::downgrade(&process),
inner: unsafe { UPSafeCell::new(
TaskControlBlockInner {
res,
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: 0,
}
)},
}
}
}
#[derive(Copy, Clone, PartialEq)] #[derive(Copy, Clone, PartialEq)]
pub enum TaskStatus { pub enum TaskStatus {
Ready, Ready,

View file

@ -18,9 +18,10 @@ use crate::task::{
suspend_current_and_run_next, suspend_current_and_run_next,
current_user_token, current_user_token,
current_trap_cx, current_trap_cx,
current_trap_cx_user_va,
}; };
use crate::timer::set_next_trigger; use crate::timer::set_next_trigger;
use crate::config::{TRAP_CONTEXT, TRAMPOLINE}; use crate::config::TRAMPOLINE;
global_asm!(include_str!("trap.S")); global_asm!(include_str!("trap.S"));
@ -46,6 +47,7 @@ pub fn enable_timer_interrupt() {
#[no_mangle] #[no_mangle]
pub fn trap_handler() -> ! { pub fn trap_handler() -> ! {
println!("into trap!");
set_kernel_trap_entry(); set_kernel_trap_entry();
let scause = scause::read(); let scause = scause::read();
let stval = stval::read(); let stval = stval::read();
@ -53,6 +55,7 @@ pub fn trap_handler() -> ! {
Trap::Exception(Exception::UserEnvCall) => { Trap::Exception(Exception::UserEnvCall) => {
// jump to next instruction anyway // jump to next instruction anyway
let mut cx = current_trap_cx(); let mut cx = current_trap_cx();
println!("syscall #{}", cx.x[17]);
cx.sepc += 4; cx.sepc += 4;
// get system call return value // get system call return value
let result = syscall(cx.x[17], [cx.x[10], cx.x[11], cx.x[12]]); let result = syscall(cx.x[17], [cx.x[10], cx.x[11], cx.x[12]]);
@ -88,15 +91,16 @@ pub fn trap_handler() -> ! {
panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval); panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval);
} }
} }
//println!("before trap_return");
trap_return(); trap_return();
} }
#[no_mangle] #[no_mangle]
pub fn trap_return() -> ! { pub fn trap_return() -> ! {
println!("into trap_return!");
set_user_trap_entry(); set_user_trap_entry();
let trap_cx_ptr = TRAP_CONTEXT; let trap_cx_user_va = current_trap_cx_user_va();
let user_satp = current_user_token(); let user_satp = current_user_token();
println!("trap_cx = {:#x}, user_satp = {:#x}", trap_cx_user_va, user_satp);
extern "C" { extern "C" {
fn __alltraps(); fn __alltraps();
fn __restore(); fn __restore();
@ -107,7 +111,7 @@ pub fn trap_return() -> ! {
"fence.i", "fence.i",
"jr {restore_va}", "jr {restore_va}",
restore_va = in(reg) restore_va, restore_va = in(reg) restore_va,
in("a0") trap_cx_ptr, in("a0") trap_cx_user_va,
in("a1") user_satp, in("a1") user_satp,
options(noreturn) options(noreturn)
); );

View file

@ -13,6 +13,7 @@ use user_lib::{
#[no_mangle] #[no_mangle]
fn main() -> i32 { fn main() -> i32 {
println!("start initproc!");
if fork() == 0 { if fork() == 0 {
exec("user_shell\0", &[0 as *const u8]); exec("user_shell\0", &[0 as *const u8]);
} else { } else {
@ -31,4 +32,4 @@ fn main() -> i32 {
} }
} }
0 0
} }