Rm spin::Mutex except for easy-fs & add new test huge_write & flush cache to disk after a write transaction

This commit is contained in:
Yifan Wu 2021-07-21 19:10:04 +08:00
parent 569e2fe2fe
commit b8a14182cd
36 changed files with 339 additions and 229 deletions

1
.gitignore vendored
View file

@ -2,6 +2,7 @@
os/target/* os/target/*
os/.idea/* os/.idea/*
os/src/link_app.S os/src/link_app.S
os/last-*
os/Cargo.lock os/Cargo.lock
user/target/* user/target/*
user/.idea/* user/.idea/*

View file

@ -56,13 +56,13 @@ fn easy_fs_pack() -> std::io::Result<()> {
.write(true) .write(true)
.create(true) .create(true)
.open(format!("{}{}", target_path, "fs.img"))?; .open(format!("{}{}", target_path, "fs.img"))?;
f.set_len(8192 * 512).unwrap(); f.set_len(128 * 2048 * 512).unwrap();
f f
}))); })));
// 4MiB, at most 4095 files // 128MiB, at most 4095 files
let efs = EasyFileSystem::create( let efs = EasyFileSystem::create(
block_file.clone(), block_file.clone(),
8192, 128 * 2048,
1, 1,
); );
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs)); let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));

View file

@ -126,3 +126,10 @@ pub fn get_block_cache(
) -> Arc<Mutex<BlockCache>> { ) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device) BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
} }
pub fn block_cache_sync_all() {
let manager = BLOCK_CACHE_MANAGER.lock();
for (_, cache) in manager.queue.iter() {
cache.lock().sync();
}
}

View file

@ -8,6 +8,7 @@ use super::{
DiskInodeType, DiskInodeType,
Inode, Inode,
get_block_cache, get_block_cache,
block_cache_sync_all,
}; };
use crate::BLOCK_SZ; use crate::BLOCK_SZ;
@ -82,6 +83,7 @@ impl EasyFileSystem {
.modify(root_inode_offset, |disk_inode: &mut DiskInode| { .modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory); disk_inode.initialize(DiskInodeType::Directory);
}); });
block_cache_sync_all();
Arc::new(Mutex::new(efs)) Arc::new(Mutex::new(efs))
} }

View file

@ -416,7 +416,7 @@ impl DirEntry {
} }
pub fn new(name: &str, inode_number: u32) -> Self { pub fn new(name: &str, inode_number: u32) -> Self {
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1]; let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
&mut bytes[..name.len()].copy_from_slice(name.as_bytes()); bytes[..name.len()].copy_from_slice(name.as_bytes());
Self { Self {
name: bytes, name: bytes,
inode_number, inode_number,

View file

@ -15,4 +15,4 @@ pub use efs::EasyFileSystem;
pub use vfs::Inode; pub use vfs::Inode;
use layout::*; use layout::*;
use bitmap::Bitmap; use bitmap::Bitmap;
use block_cache::get_block_cache; use block_cache::{get_block_cache, block_cache_sync_all};

View file

@ -6,6 +6,7 @@ use super::{
EasyFileSystem, EasyFileSystem,
DIRENT_SZ, DIRENT_SZ,
get_block_cache, get_block_cache,
block_cache_sync_all,
}; };
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::string::String; use alloc::string::String;
@ -145,6 +146,7 @@ impl Inode {
}); });
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id); let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
block_cache_sync_all();
// return inode // return inode
Some(Arc::new(Self::new( Some(Arc::new(Self::new(
block_id, block_id,
@ -185,10 +187,12 @@ impl Inode {
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize { pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock(); let mut fs = self.fs.lock();
self.modify_disk_inode(|disk_inode| { let size = self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs); self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
disk_inode.write_at(offset, buf, &self.block_device) disk_inode.write_at(offset, buf, &self.block_device)
}) });
block_cache_sync_all();
size
} }
pub fn clear(&self) { pub fn clear(&self) {
@ -201,5 +205,6 @@ impl Inode {
fs.dealloc_data(data_block); fs.dealloc_data(data_block);
} }
}); });
block_cache_sync_all();
} }
} }

View file

@ -10,7 +10,6 @@ edition = "2018"
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] } riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] } lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
buddy_system_allocator = "0.6" buddy_system_allocator = "0.6"
spin = "0.7.0"
bitflags = "1.2.1" bitflags = "1.2.1"
xmas-elf = "0.7.0" xmas-elf = "0.7.0"
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" } virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" }

View file

@ -32,7 +32,14 @@ OBJCOPY := rust-objcopy --binary-architecture=riscv64
# Disassembly # Disassembly
DISASM ?= -x DISASM ?= -x
build: env $(KERNEL_BIN) $(FS_IMG) build: env switch-check $(KERNEL_BIN) fs-img
switch-check:
ifeq ($(BOARD), qemu)
(which last-qemu) || (rm last-k210 -f && touch last-qemu && make clean)
else ifeq ($(BOARD), k210)
(which last-k210) || (rm last-qemu -f && touch last-k210 && make clean)
endif
env: env:
(rustup target list | grep "riscv64gc-unknown-none-elf (installed)") || rustup target add $(TARGET) (rustup target list | grep "riscv64gc-unknown-none-elf (installed)") || rustup target add $(TARGET)
@ -40,16 +47,17 @@ env:
rustup component add rust-src rustup component add rust-src
rustup component add llvm-tools-preview rustup component add llvm-tools-preview
sdcard: $(FS_IMG) sdcard: fs-img
@echo "Are you sure write to $(SDCARD) ? [y/N] " && read ans && [ $${ans:-N} = y ] @echo "Are you sure write to $(SDCARD) ? [y/N] " && read ans && [ $${ans:-N} = y ]
@sudo dd if=/dev/zero of=$(SDCARD) bs=1048576 count=16 @sudo dd if=/dev/zero of=$(SDCARD) bs=1048576 count=256
@sudo dd if=$(FS_IMG) of=$(SDCARD) @sudo dd if=$(FS_IMG) of=$(SDCARD)
$(KERNEL_BIN): kernel $(KERNEL_BIN): kernel
@$(OBJCOPY) $(KERNEL_ELF) --strip-all -O binary $@ @$(OBJCOPY) $(KERNEL_ELF) --strip-all -O binary $@
$(FS_IMG): $(APPS) fs-img: $(APPS)
@cd ../user && make build @cd ../user && make build
@rm $(FS_IMG)
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/ @cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
$(APPS): $(APPS):
@ -73,8 +81,6 @@ disasm-vim: kernel
run: run-inner run: run-inner
run-inner: build run-inner: build
ifeq ($(BOARD),qemu) ifeq ($(BOARD),qemu)
@qemu-system-riscv64 \ @qemu-system-riscv64 \
@ -100,4 +106,4 @@ debug: build
tmux split-window -h "riscv64-unknown-elf-gdb -ex 'file $(KERNEL_ELF)' -ex 'set arch riscv:rv64' -ex 'target remote localhost:1234'" && \ tmux split-window -h "riscv64-unknown-elf-gdb -ex 'file $(KERNEL_ELF)' -ex 'set arch riscv:rv64' -ex 'target remote localhost:1234'" && \
tmux -2 attach-session -d tmux -2 attach-session -d
.PHONY: build env kernel clean disasm disasm-vim run-inner .PHONY: build env kernel clean disasm disasm-vim run-inner switch-check fs-img

View file

@ -13,7 +13,7 @@ use k210_soc::{
sysctl, sysctl,
sleep::usleep, sleep::usleep,
}; };
use spin::Mutex; use crate::sync::UPSafeCell;
use lazy_static::*; use lazy_static::*;
use super::BlockDevice; use super::BlockDevice;
use core::convert::TryInto; use core::convert::TryInto;
@ -711,7 +711,9 @@ fn io_init() {
} }
lazy_static! { lazy_static! {
static ref PERIPHERALS: Mutex<Peripherals> = Mutex::new(Peripherals::take().unwrap()); static ref PERIPHERALS: UPSafeCell<Peripherals> = unsafe {
UPSafeCell::new(Peripherals::take().unwrap())
};
} }
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> { fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
@ -735,19 +737,19 @@ fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
sd sd
} }
pub struct SDCardWrapper(Mutex<SDCard<SPIImpl<SPI0>>>); pub struct SDCardWrapper(UPSafeCell<SDCard<SPIImpl<SPI0>>>);
impl SDCardWrapper { impl SDCardWrapper {
pub fn new() -> Self { pub fn new() -> Self {
Self(Mutex::new(init_sdcard())) unsafe { Self(UPSafeCell::new(init_sdcard())) }
} }
} }
impl BlockDevice for SDCardWrapper { impl BlockDevice for SDCardWrapper {
fn read_block(&self, block_id: usize, buf: &mut [u8]) { fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.lock().read_sector(buf,block_id as u32).unwrap(); self.0.exclusive_access().read_sector(buf,block_id as u32).unwrap();
} }
fn write_block(&self, block_id: usize, buf: &[u8]) { fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.lock().write_sector(buf,block_id as u32).unwrap(); self.0.exclusive_access().write_sector(buf,block_id as u32).unwrap();
} }
} }

View file

@ -12,34 +12,42 @@ use crate::mm::{
kernel_token, kernel_token,
}; };
use super::BlockDevice; use super::BlockDevice;
use spin::Mutex; use crate::sync::UPSafeCell;
use alloc::vec::Vec; use alloc::vec::Vec;
use lazy_static::*; use lazy_static::*;
#[allow(unused)] #[allow(unused)]
const VIRTIO0: usize = 0x10001000; const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock(Mutex<VirtIOBlk<'static>>); pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
lazy_static! { lazy_static! {
static ref QUEUE_FRAMES: Mutex<Vec<FrameTracker>> = Mutex::new(Vec::new()); static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe {
UPSafeCell::new(Vec::new())
};
} }
impl BlockDevice for VirtIOBlock { impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) { fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.lock().read_block(block_id, buf).expect("Error when reading VirtIOBlk"); self.0.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
} }
fn write_block(&self, block_id: usize, buf: &[u8]) { fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.lock().write_block(block_id, buf).expect("Error when writing VirtIOBlk"); self.0.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
} }
} }
impl VirtIOBlock { impl VirtIOBlock {
#[allow(unused)] #[allow(unused)]
pub fn new() -> Self { pub fn new() -> Self {
Self(Mutex::new(VirtIOBlk::new( unsafe {
unsafe { &mut *(VIRTIO0 as *mut VirtIOHeader) } Self(UPSafeCell::new(VirtIOBlk::new(
).unwrap())) &mut *(VIRTIO0 as *mut VirtIOHeader)
).unwrap()))
}
} }
} }
@ -50,7 +58,7 @@ pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
let frame = frame_alloc().unwrap(); let frame = frame_alloc().unwrap();
if i == 0 { ppn_base = frame.ppn; } if i == 0 { ppn_base = frame.ppn; }
assert_eq!(frame.ppn.0, ppn_base.0 + i); assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.lock().push(frame); QUEUE_FRAMES.exclusive_access().push(frame);
} }
ppn_base.into() ppn_base.into()
} }

View file

@ -3,18 +3,18 @@ use easy_fs::{
Inode, Inode,
}; };
use crate::drivers::BLOCK_DEVICE; use crate::drivers::BLOCK_DEVICE;
use crate::sync::UPSafeCell;
use alloc::sync::Arc; use alloc::sync::Arc;
use lazy_static::*; use lazy_static::*;
use bitflags::*; use bitflags::*;
use alloc::vec::Vec; use alloc::vec::Vec;
use spin::Mutex;
use super::File; use super::File;
use crate::mm::UserBuffer; use crate::mm::UserBuffer;
pub struct OSInode { pub struct OSInode {
readable: bool, readable: bool,
writable: bool, writable: bool,
inner: Mutex<OSInodeInner>, inner: UPSafeCell<OSInodeInner>,
} }
pub struct OSInodeInner { pub struct OSInodeInner {
@ -31,14 +31,14 @@ impl OSInode {
Self { Self {
readable, readable,
writable, writable,
inner: Mutex::new(OSInodeInner { inner: unsafe { UPSafeCell::new(OSInodeInner {
offset: 0, offset: 0,
inode, inode,
}), })},
} }
} }
pub fn read_all(&self) -> Vec<u8> { pub fn read_all(&self) -> Vec<u8> {
let mut inner = self.inner.lock(); let mut inner = self.inner.exclusive_access();
let mut buffer = [0u8; 512]; let mut buffer = [0u8; 512];
let mut v: Vec<u8> = Vec::new(); let mut v: Vec<u8> = Vec::new();
loop { loop {
@ -133,7 +133,7 @@ impl File for OSInode {
fn readable(&self) -> bool { self.readable } fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable } fn writable(&self) -> bool { self.writable }
fn read(&self, mut buf: UserBuffer) -> usize { fn read(&self, mut buf: UserBuffer) -> usize {
let mut inner = self.inner.lock(); let mut inner = self.inner.exclusive_access();
let mut total_read_size = 0usize; let mut total_read_size = 0usize;
for slice in buf.buffers.iter_mut() { for slice in buf.buffers.iter_mut() {
let read_size = inner.inode.read_at(inner.offset, *slice); let read_size = inner.inode.read_at(inner.offset, *slice);
@ -146,7 +146,7 @@ impl File for OSInode {
total_read_size total_read_size
} }
fn write(&self, buf: UserBuffer) -> usize { fn write(&self, buf: UserBuffer) -> usize {
let mut inner = self.inner.lock(); let mut inner = self.inner.exclusive_access();
let mut total_write_size = 0usize; let mut total_write_size = 0usize;
for slice in buf.buffers.iter() { for slice in buf.buffers.iter() {
let write_size = inner.inode.write_at(inner.offset, *slice); let write_size = inner.inode.write_at(inner.offset, *slice);

View file

@ -1,26 +1,25 @@
use super::File; use super::File;
use alloc::sync::{Arc, Weak}; use alloc::sync::{Arc, Weak};
use spin::Mutex; use crate::sync::UPSafeCell;
use crate::mm::{ use crate::mm::UserBuffer;
UserBuffer,
};
use crate::task::suspend_current_and_run_next; use crate::task::suspend_current_and_run_next;
pub struct Pipe { pub struct Pipe {
readable: bool, readable: bool,
writable: bool, writable: bool,
buffer: Arc<Mutex<PipeRingBuffer>>, buffer: Arc<UPSafeCell<PipeRingBuffer>>,
} }
impl Pipe { impl Pipe {
pub fn read_end_with_buffer(buffer: Arc<Mutex<PipeRingBuffer>>) -> Self { pub fn read_end_with_buffer(buffer: Arc<UPSafeCell<PipeRingBuffer>>) -> Self {
Self { Self {
readable: true, readable: true,
writable: false, writable: false,
buffer, buffer,
} }
} }
pub fn write_end_with_buffer(buffer: Arc<Mutex<PipeRingBuffer>>) -> Self { pub fn write_end_with_buffer(buffer: Arc<UPSafeCell<PipeRingBuffer>>) -> Self {
Self { Self {
readable: false, readable: false,
writable: true, writable: true,
@ -101,14 +100,16 @@ impl PipeRingBuffer {
/// Return (read_end, write_end) /// Return (read_end, write_end)
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) { pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
let buffer = Arc::new(Mutex::new(PipeRingBuffer::new())); let buffer = Arc::new(unsafe {
UPSafeCell::new(PipeRingBuffer::new())
});
let read_end = Arc::new( let read_end = Arc::new(
Pipe::read_end_with_buffer(buffer.clone()) Pipe::read_end_with_buffer(buffer.clone())
); );
let write_end = Arc::new( let write_end = Arc::new(
Pipe::write_end_with_buffer(buffer.clone()) Pipe::write_end_with_buffer(buffer.clone())
); );
buffer.lock().set_write_end(&write_end); buffer.exclusive_access().set_write_end(&write_end);
(read_end, write_end) (read_end, write_end)
} }
@ -120,7 +121,7 @@ impl File for Pipe {
let mut buf_iter = buf.into_iter(); let mut buf_iter = buf.into_iter();
let mut read_size = 0usize; let mut read_size = 0usize;
loop { loop {
let mut ring_buffer = self.buffer.lock(); let mut ring_buffer = self.buffer.exclusive_access();
let loop_read = ring_buffer.available_read(); let loop_read = ring_buffer.available_read();
if loop_read == 0 { if loop_read == 0 {
if ring_buffer.all_write_ends_closed() { if ring_buffer.all_write_ends_closed() {
@ -146,7 +147,7 @@ impl File for Pipe {
let mut buf_iter = buf.into_iter(); let mut buf_iter = buf.into_iter();
let mut write_size = 0usize; let mut write_size = 0usize;
loop { loop {
let mut ring_buffer = self.buffer.lock(); let mut ring_buffer = self.buffer.exclusive_access();
let loop_write = ring_buffer.available_write(); let loop_write = ring_buffer.available_write();
if loop_write == 0 { if loop_write == 0 {
drop(ring_buffer); drop(ring_buffer);

View file

@ -1,9 +1,8 @@
#![no_std] #![no_std]
#![no_main] #![no_main]
#![feature(global_asm)] #![feature(global_asm)]
#![feature(llvm_asm)] #![feature(asm)]
#![feature(panic_info_message)] #![feature(panic_info_message)]
#![feature(const_in_array_repeat_expressions)]
#![feature(alloc_error_handler)] #![feature(alloc_error_handler)]
extern crate alloc; extern crate alloc;
@ -20,6 +19,7 @@ mod trap;
mod config; mod config;
mod task; mod task;
mod timer; mod timer;
mod sync;
mod mm; mod mm;
mod fs; mod fs;
mod drivers; mod drivers;
@ -31,9 +31,12 @@ fn clear_bss() {
fn sbss(); fn sbss();
fn ebss(); fn ebss();
} }
(sbss as usize..ebss as usize).for_each(|a| { unsafe {
unsafe { (a as *mut u8).write_volatile(0) } core::slice::from_raw_parts_mut(
}); sbss as usize as *mut u8,
ebss as usize - sbss as usize,
).fill(0);
}
} }
#[no_mangle] #[no_mangle]

View file

@ -1,6 +1,6 @@
use super::{PhysAddr, PhysPageNum}; use super::{PhysAddr, PhysPageNum};
use alloc::vec::Vec; use alloc::vec::Vec;
use spin::Mutex; use crate::sync::UPSafeCell;
use crate::config::MEMORY_END; use crate::config::MEMORY_END;
use lazy_static::*; use lazy_static::*;
use core::fmt::{self, Debug, Formatter}; use core::fmt::{self, Debug, Formatter};
@ -88,8 +88,9 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator; type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! { lazy_static! {
pub static ref FRAME_ALLOCATOR: Mutex<FrameAllocatorImpl> = pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> = unsafe {
Mutex::new(FrameAllocatorImpl::new()); UPSafeCell::new(FrameAllocatorImpl::new())
};
} }
pub fn init_frame_allocator() { pub fn init_frame_allocator() {
@ -97,20 +98,20 @@ pub fn init_frame_allocator() {
fn ekernel(); fn ekernel();
} }
FRAME_ALLOCATOR FRAME_ALLOCATOR
.lock() .exclusive_access()
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor()); .init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
} }
pub fn frame_alloc() -> Option<FrameTracker> { pub fn frame_alloc() -> Option<FrameTracker> {
FRAME_ALLOCATOR FRAME_ALLOCATOR
.lock() .exclusive_access()
.alloc() .alloc()
.map(|ppn| FrameTracker::new(ppn)) .map(|ppn| FrameTracker::new(ppn))
} }
pub fn frame_dealloc(ppn: PhysPageNum) { pub fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR FRAME_ALLOCATOR
.lock() .exclusive_access()
.dealloc(ppn); .dealloc(ppn);
} }

View file

@ -7,7 +7,7 @@ use alloc::vec::Vec;
use riscv::register::satp; use riscv::register::satp;
use alloc::sync::Arc; use alloc::sync::Arc;
use lazy_static::*; use lazy_static::*;
use spin::Mutex; use crate::sync::UPSafeCell;
use crate::config::{ use crate::config::{
MEMORY_END, MEMORY_END,
PAGE_SIZE, PAGE_SIZE,
@ -31,13 +31,13 @@ extern "C" {
} }
lazy_static! { lazy_static! {
pub static ref KERNEL_SPACE: Arc<Mutex<MemorySet>> = Arc::new(Mutex::new( pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> = Arc::new(unsafe {
MemorySet::new_kernel() UPSafeCell::new(MemorySet::new_kernel())
)); });
} }
pub fn kernel_token() -> usize { pub fn kernel_token() -> usize {
KERNEL_SPACE.lock().token() KERNEL_SPACE.exclusive_access().token()
} }
pub struct MemorySet { pub struct MemorySet {
@ -220,7 +220,7 @@ impl MemorySet {
let satp = self.page_table.token(); let satp = self.page_table.token();
unsafe { unsafe {
satp::write(satp); satp::write(satp);
llvm_asm!("sfence.vma" :::: "volatile"); asm!("sfence.vma");
} }
} }
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> { pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
@ -338,7 +338,7 @@ bitflags! {
#[allow(unused)] #[allow(unused)]
pub fn remap_test() { pub fn remap_test() {
let mut kernel_space = KERNEL_SPACE.lock(); let mut kernel_space = KERNEL_SPACE.exclusive_access();
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into(); let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into(); let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into(); let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();

View file

@ -24,5 +24,5 @@ pub use memory_set::remap_test;
pub fn init() { pub fn init() {
heap_allocator::init_heap(); heap_allocator::init_heap();
frame_allocator::init_frame_allocator(); frame_allocator::init_frame_allocator();
KERNEL_SPACE.lock().activate(); KERNEL_SPACE.exclusive_access().activate();
} }

View file

@ -14,11 +14,12 @@ const SBI_SHUTDOWN: usize = 8;
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize { fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let mut ret; let mut ret;
unsafe { unsafe {
llvm_asm!("ecall" asm!(
: "={x10}" (ret) "ecall",
: "{x10}" (arg0), "{x11}" (arg1), "{x12}" (arg2), "{x17}" (which) inlateout("x10") arg0 => ret,
: "memory" in("x11") arg1,
: "volatile" in("x12") arg2,
in("x17") which,
); );
} }
ret ret

3
os/src/sync/mod.rs Normal file
View file

@ -0,0 +1,3 @@
mod up;
pub use up::UPSafeCell;

27
os/src/sync/up.rs Normal file
View file

@ -0,0 +1,27 @@
use core::cell::{RefCell, RefMut};
/// Wrap a static data structure inside it so that we are
/// able to access it without any `unsafe`.
///
/// We should only use it in uniprocessor.
///
/// In order to get mutable reference of inner data, call
/// `exclusive_access`.
pub struct UPSafeCell<T> {
/// inner data
inner: RefCell<T>,
}
unsafe impl<T> Sync for UPSafeCell<T> {}
impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self { inner: RefCell::new(value) }
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
}

View file

@ -11,7 +11,7 @@ use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize { pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token(); let token = current_user_token();
let task = current_task().unwrap(); let task = current_task().unwrap();
let inner = task.acquire_inner_lock(); let inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -20,7 +20,7 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
return -1; return -1;
} }
let file = file.clone(); let file = file.clone();
// release Task lock manually to avoid deadlock // release current task TCB manually to avoid multi-borrow
drop(inner); drop(inner);
file.write( file.write(
UserBuffer::new(translated_byte_buffer(token, buf, len)) UserBuffer::new(translated_byte_buffer(token, buf, len))
@ -33,7 +33,7 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize { pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token(); let token = current_user_token();
let task = current_task().unwrap(); let task = current_task().unwrap();
let inner = task.acquire_inner_lock(); let inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -42,7 +42,7 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
if !file.readable() { if !file.readable() {
return -1; return -1;
} }
// release Task lock manually to avoid deadlock // release current task TCB manually to avoid multi-borrow
drop(inner); drop(inner);
file.read( file.read(
UserBuffer::new(translated_byte_buffer(token, buf, len)) UserBuffer::new(translated_byte_buffer(token, buf, len))
@ -60,7 +60,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
path.as_str(), path.as_str(),
OpenFlags::from_bits(flags).unwrap() OpenFlags::from_bits(flags).unwrap()
) { ) {
let mut inner = task.acquire_inner_lock(); let mut inner = task.inner_exclusive_access();
let fd = inner.alloc_fd(); let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode); inner.fd_table[fd] = Some(inode);
fd as isize fd as isize
@ -71,7 +71,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
pub fn sys_close(fd: usize) -> isize { pub fn sys_close(fd: usize) -> isize {
let task = current_task().unwrap(); let task = current_task().unwrap();
let mut inner = task.acquire_inner_lock(); let mut inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }
@ -85,7 +85,7 @@ pub fn sys_close(fd: usize) -> isize {
pub fn sys_pipe(pipe: *mut usize) -> isize { pub fn sys_pipe(pipe: *mut usize) -> isize {
let task = current_task().unwrap(); let task = current_task().unwrap();
let token = current_user_token(); let token = current_user_token();
let mut inner = task.acquire_inner_lock(); let mut inner = task.inner_exclusive_access();
let (pipe_read, pipe_write) = make_pipe(); let (pipe_read, pipe_write) = make_pipe();
let read_fd = inner.alloc_fd(); let read_fd = inner.alloc_fd();
inner.fd_table[read_fd] = Some(pipe_read); inner.fd_table[read_fd] = Some(pipe_read);
@ -98,7 +98,7 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
pub fn sys_dup(fd: usize) -> isize { pub fn sys_dup(fd: usize) -> isize {
let task = current_task().unwrap(); let task = current_task().unwrap();
let mut inner = task.acquire_inner_lock(); let mut inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() { if fd >= inner.fd_table.len() {
return -1; return -1;
} }

View file

@ -42,7 +42,7 @@ pub fn sys_fork() -> isize {
let new_task = current_task.fork(); let new_task = current_task.fork();
let new_pid = new_task.pid.0; let new_pid = new_task.pid.0;
// modify trap context of new_task, because it returns immediately after switching // modify trap context of new_task, because it returns immediately after switching
let trap_cx = new_task.acquire_inner_lock().get_trap_cx(); let trap_cx = new_task.inner_exclusive_access().get_trap_cx();
// we do not have to move to next instruction since we have done it before // we do not have to move to next instruction since we have done it before
// for child process, fork returns 0 // for child process, fork returns 0
trap_cx.x[10] = 0; trap_cx.x[10] = 0;
@ -81,35 +81,35 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
let task = current_task().unwrap(); let task = current_task().unwrap();
// find a child process // find a child process
// ---- hold current PCB lock // ---- access current PCB exclusively
let mut inner = task.acquire_inner_lock(); let mut inner = task.inner_exclusive_access();
if inner.children if inner.children
.iter() .iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()}) .find(|p| {pid == -1 || pid as usize == p.getpid()})
.is_none() { .is_none() {
return -1; return -1;
// ---- release current PCB lock // ---- release current PCB
} }
let pair = inner.children let pair = inner.children
.iter() .iter()
.enumerate() .enumerate()
.find(|(_, p)| { .find(|(_, p)| {
// ++++ temporarily hold child PCB lock // ++++ temporarily access child PCB exclusively
p.acquire_inner_lock().is_zombie() && (pid == -1 || pid as usize == p.getpid()) p.inner_exclusive_access().is_zombie() && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB lock // ++++ release child PCB
}); });
if let Some((idx, _)) = pair { if let Some((idx, _)) = pair {
let child = inner.children.remove(idx); let child = inner.children.remove(idx);
// confirm that child will be deallocated after being removed from children list // confirm that child will be deallocated after being removed from children list
assert_eq!(Arc::strong_count(&child), 1); assert_eq!(Arc::strong_count(&child), 1);
let found_pid = child.getpid(); let found_pid = child.getpid();
// ++++ temporarily hold child lock // ++++ temporarily access child PCB exclusively
let exit_code = child.acquire_inner_lock().exit_code; let exit_code = child.inner_exclusive_access().exit_code;
// ++++ release child PCB lock // ++++ release child PCB
*translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code; *translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code;
found_pid as isize found_pid as isize
} else { } else {
-2 -2
} }
// ---- release current PCB lock automatically // ---- release current PCB automatically
} }

View file

@ -3,13 +3,22 @@ use crate::trap::trap_return;
#[repr(C)] #[repr(C)]
pub struct TaskContext { pub struct TaskContext {
ra: usize, ra: usize,
sp: usize,
s: [usize; 12], s: [usize; 12],
} }
impl TaskContext { impl TaskContext {
pub fn goto_trap_return() -> Self { pub fn zero_init() -> Self {
Self {
ra: 0,
sp: 0,
s: [0; 12],
}
}
pub fn goto_trap_return(kstack_ptr: usize) -> Self {
Self { Self {
ra: trap_return as usize, ra: trap_return as usize,
sp: kstack_ptr,
s: [0; 12], s: [0; 12],
} }
} }

View file

@ -1,7 +1,7 @@
use crate::sync::UPSafeCell;
use super::TaskControlBlock; use super::TaskControlBlock;
use alloc::collections::VecDeque; use alloc::collections::VecDeque;
use alloc::sync::Arc; use alloc::sync::Arc;
use spin::Mutex;
use lazy_static::*; use lazy_static::*;
pub struct TaskManager { pub struct TaskManager {
@ -22,13 +22,15 @@ impl TaskManager {
} }
lazy_static! { lazy_static! {
pub static ref TASK_MANAGER: Mutex<TaskManager> = Mutex::new(TaskManager::new()); pub static ref TASK_MANAGER: UPSafeCell<TaskManager> = unsafe {
UPSafeCell::new(TaskManager::new())
};
} }
pub fn add_task(task: Arc<TaskControlBlock>) { pub fn add_task(task: Arc<TaskControlBlock>) {
TASK_MANAGER.lock().add(task); TASK_MANAGER.exclusive_access().add(task);
} }
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> { pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.lock().fetch() TASK_MANAGER.exclusive_access().fetch()
} }

View file

@ -28,51 +28,51 @@ pub fn suspend_current_and_run_next() {
// There must be an application running. // There must be an application running.
let task = take_current_task().unwrap(); let task = take_current_task().unwrap();
// ---- hold current PCB lock // ---- access current TCB exclusively
let mut task_inner = task.acquire_inner_lock(); let mut task_inner = task.inner_exclusive_access();
let task_cx_ptr2 = task_inner.get_task_cx_ptr2(); let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
// Change status to Ready // Change status to Ready
task_inner.task_status = TaskStatus::Ready; task_inner.task_status = TaskStatus::Ready;
drop(task_inner); drop(task_inner);
// ---- release current PCB lock // ---- release current PCB
// push back to ready queue. // push back to ready queue.
add_task(task); add_task(task);
// jump to scheduling cycle // jump to scheduling cycle
schedule(task_cx_ptr2); schedule(task_cx_ptr);
} }
pub fn exit_current_and_run_next(exit_code: i32) { pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor // take from Processor
let task = take_current_task().unwrap(); let task = take_current_task().unwrap();
// **** hold current PCB lock // **** access current TCB exclusively
let mut inner = task.acquire_inner_lock(); let mut inner = task.inner_exclusive_access();
// Change status to Zombie // Change status to Zombie
inner.task_status = TaskStatus::Zombie; inner.task_status = TaskStatus::Zombie;
// Record exit code // Record exit code
inner.exit_code = exit_code; inner.exit_code = exit_code;
// do not move to its parent but under initproc // do not move to its parent but under initproc
// ++++++ hold initproc PCB lock here // ++++++ access initproc TCB exclusively
{ {
let mut initproc_inner = INITPROC.acquire_inner_lock(); let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in inner.children.iter() { for child in inner.children.iter() {
child.acquire_inner_lock().parent = Some(Arc::downgrade(&INITPROC)); child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone()); initproc_inner.children.push(child.clone());
} }
} }
// ++++++ release parent PCB lock here // ++++++ release parent PCB
inner.children.clear(); inner.children.clear();
// deallocate user space // deallocate user space
inner.memory_set.recycle_data_pages(); inner.memory_set.recycle_data_pages();
drop(inner); drop(inner);
// **** release current PCB lock // **** release current PCB
// drop task manually to maintain rc correctly // drop task manually to maintain rc correctly
drop(task); drop(task);
// we do not have to save task context // we do not have to save task context
let _unused: usize = 0; let mut _unused = TaskContext::zero_init();
schedule(&_unused as *const _); schedule(&mut _unused as *mut _);
} }
lazy_static! { lazy_static! {

View file

@ -1,6 +1,6 @@
use alloc::vec::Vec; use alloc::vec::Vec;
use lazy_static::*; use lazy_static::*;
use spin::Mutex; use crate::sync::UPSafeCell;
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr}; use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
use crate::config::{ use crate::config::{
PAGE_SIZE, PAGE_SIZE,
@ -39,7 +39,9 @@ impl PidAllocator {
} }
lazy_static! { lazy_static! {
static ref PID_ALLOCATOR : Mutex<PidAllocator> = Mutex::new(PidAllocator::new()); static ref PID_ALLOCATOR : UPSafeCell<PidAllocator> = unsafe {
UPSafeCell::new(PidAllocator::new())
};
} }
pub struct PidHandle(pub usize); pub struct PidHandle(pub usize);
@ -47,12 +49,12 @@ pub struct PidHandle(pub usize);
impl Drop for PidHandle { impl Drop for PidHandle {
fn drop(&mut self) { fn drop(&mut self) {
//println!("drop pid {}", self.0); //println!("drop pid {}", self.0);
PID_ALLOCATOR.lock().dealloc(self.0); PID_ALLOCATOR.exclusive_access().dealloc(self.0);
} }
} }
pub fn pid_alloc() -> PidHandle { pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.lock().alloc() PID_ALLOCATOR.exclusive_access().alloc()
} }
/// Return (bottom, top) of a kernel stack in kernel space. /// Return (bottom, top) of a kernel stack in kernel space.
@ -71,7 +73,7 @@ impl KernelStack {
let pid = pid_handle.0; let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid); let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
KERNEL_SPACE KERNEL_SPACE
.lock() .exclusive_access()
.insert_framed_area( .insert_framed_area(
kernel_stack_bottom.into(), kernel_stack_bottom.into(),
kernel_stack_top.into(), kernel_stack_top.into(),
@ -81,6 +83,7 @@ impl KernelStack {
pid: pid_handle.0, pid: pid_handle.0,
} }
} }
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T where pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, { T: Sized, {
let kernel_stack_top = self.get_top(); let kernel_stack_top = self.get_top();
@ -99,7 +102,7 @@ impl Drop for KernelStack {
let (kernel_stack_bottom, _) = kernel_stack_position(self.pid); let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into(); let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE KERNEL_SPACE
.lock() .exclusive_access()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into()); .remove_area_with_start_vpn(kernel_stack_bottom_va.into());
} }
} }

View file

@ -1,95 +1,90 @@
use super::TaskControlBlock; use super::{TaskContext, TaskControlBlock};
use alloc::sync::Arc; use alloc::sync::Arc;
use core::cell::RefCell;
use lazy_static::*; use lazy_static::*;
use super::{fetch_task, TaskStatus}; use super::{fetch_task, TaskStatus};
use super::__switch; use super::__switch;
use crate::trap::TrapContext; use crate::trap::TrapContext;
use crate::sync::UPSafeCell;
pub struct Processor { pub struct Processor {
inner: RefCell<ProcessorInner>,
}
unsafe impl Sync for Processor {}
struct ProcessorInner {
current: Option<Arc<TaskControlBlock>>, current: Option<Arc<TaskControlBlock>>,
idle_task_cx_ptr: usize, idle_task_cx: TaskContext,
} }
impl Processor { impl Processor {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
inner: RefCell::new(ProcessorInner { current: None,
current: None, idle_task_cx: TaskContext::zero_init(),
idle_task_cx_ptr: 0,
}),
} }
} }
fn get_idle_task_cx_ptr2(&self) -> *const usize { fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
let inner = self.inner.borrow(); &mut self.idle_task_cx as *mut _
&inner.idle_task_cx_ptr as *const usize
} }
pub fn run(&self) { pub fn take_current(&mut self) -> Option<Arc<TaskControlBlock>> {
loop { self.current.take()
if let Some(task) = fetch_task() {
let idle_task_cx_ptr2 = self.get_idle_task_cx_ptr2();
// acquire
let mut task_inner = task.acquire_inner_lock();
let next_task_cx_ptr2 = task_inner.get_task_cx_ptr2();
task_inner.task_status = TaskStatus::Running;
drop(task_inner);
// release
self.inner.borrow_mut().current = Some(task);
unsafe {
__switch(
idle_task_cx_ptr2,
next_task_cx_ptr2,
);
}
}
}
}
pub fn take_current(&self) -> Option<Arc<TaskControlBlock>> {
self.inner.borrow_mut().current.take()
} }
pub fn current(&self) -> Option<Arc<TaskControlBlock>> { pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.inner.borrow().current.as_ref().map(|task| Arc::clone(task)) self.current.as_ref().map(|task| Arc::clone(task))
} }
} }
lazy_static! { lazy_static! {
pub static ref PROCESSOR: Processor = Processor::new(); pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe {
UPSafeCell::new(Processor::new())
};
} }
pub fn run_tasks() { pub fn run_tasks() {
PROCESSOR.run(); loop {
let mut processor = PROCESSOR.exclusive_access();
if let Some(task) = fetch_task() {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
// access coming task TCB exclusively
let mut task_inner = task.inner_exclusive_access();
let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext;
task_inner.task_status = TaskStatus::Running;
drop(task_inner);
// release coming task TCB manually
processor.current = Some(task);
// release processor manually
drop(processor);
unsafe {
__switch(
idle_task_cx_ptr,
next_task_cx_ptr,
);
}
}
}
} }
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> { pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.take_current() PROCESSOR.exclusive_access().take_current()
} }
pub fn current_task() -> Option<Arc<TaskControlBlock>> { pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.current() PROCESSOR.exclusive_access().current()
} }
pub fn current_user_token() -> usize { pub fn current_user_token() -> usize {
let task = current_task().unwrap(); let task = current_task().unwrap();
let token = task.acquire_inner_lock().get_user_token(); let token = task.inner_exclusive_access().get_user_token();
token token
} }
pub fn current_trap_cx() -> &'static mut TrapContext { pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().acquire_inner_lock().get_trap_cx() current_task().unwrap().inner_exclusive_access().get_trap_cx()
} }
pub fn schedule(switched_task_cx_ptr2: *const usize) { pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let idle_task_cx_ptr2 = PROCESSOR.get_idle_task_cx_ptr2(); let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
drop(processor);
unsafe { unsafe {
__switch( __switch(
switched_task_cx_ptr2, switched_task_cx_ptr,
idle_task_cx_ptr2, idle_task_cx_ptr,
); );
} }
} }

View file

@ -1,37 +1,34 @@
.altmacro .altmacro
.macro SAVE_SN n .macro SAVE_SN n
sd s\n, (\n+1)*8(sp) sd s\n, (\n+2)*8(a0)
.endm .endm
.macro LOAD_SN n .macro LOAD_SN n
ld s\n, (\n+1)*8(sp) ld s\n, (\n+2)*8(a1)
.endm .endm
.section .text .section .text
.globl __switch .globl __switch
__switch: __switch:
# __switch( # __switch(
# current_task_cx_ptr2: &*const TaskContext, # current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr2: &*const TaskContext # next_task_cx_ptr: *const TaskContext
# ) # )
# push TaskContext to current sp and save its address to where a0 points to # save kernel stack of current task
addi sp, sp, -13*8 sd sp, 8(a0)
sd sp, 0(a0) # save ra & s0~s11 of current execution
# fill TaskContext with ra & s0-s11 sd ra, 0(a0)
sd ra, 0(sp)
.set n, 0 .set n, 0
.rept 12 .rept 12
SAVE_SN %n SAVE_SN %n
.set n, n + 1 .set n, n + 1
.endr .endr
# ready for loading TaskContext a1 points to # restore ra & s0~s11 of next execution
ld sp, 0(a1) ld ra, 0(a1)
# load registers in the TaskContext
ld ra, 0(sp)
.set n, 0 .set n, 0
.rept 12 .rept 12
LOAD_SN %n LOAD_SN %n
.set n, n + 1 .set n, n + 1
.endr .endr
# pop TaskContext # restore kernel stack of next task
addi sp, sp, 13*8 ld sp, 8(a1)
ret ret

View file

@ -1,8 +1,10 @@
global_asm!(include_str!("switch.S")); global_asm!(include_str!("switch.S"));
use super::TaskContext;
extern "C" { extern "C" {
pub fn __switch( pub fn __switch(
current_task_cx_ptr2: *const usize, current_task_cx_ptr: *mut TaskContext,
next_task_cx_ptr2: *const usize next_task_cx_ptr: *const TaskContext
); );
} }

View file

@ -6,14 +6,15 @@ use crate::mm::{
translated_refmut, translated_refmut,
}; };
use crate::trap::{TrapContext, trap_handler}; use crate::trap::{TrapContext, trap_handler};
use crate::config::{TRAP_CONTEXT}; use crate::config::TRAP_CONTEXT;
use crate::sync::UPSafeCell;
use core::cell::RefMut;
use super::TaskContext; use super::TaskContext;
use super::{PidHandle, pid_alloc, KernelStack}; use super::{PidHandle, pid_alloc, KernelStack};
use alloc::sync::{Weak, Arc}; use alloc::sync::{Weak, Arc};
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use alloc::string::String; use alloc::string::String;
use spin::{Mutex, MutexGuard};
use crate::fs::{File, Stdin, Stdout}; use crate::fs::{File, Stdin, Stdout};
pub struct TaskControlBlock { pub struct TaskControlBlock {
@ -21,13 +22,13 @@ pub struct TaskControlBlock {
pub pid: PidHandle, pub pid: PidHandle,
pub kernel_stack: KernelStack, pub kernel_stack: KernelStack,
// mutable // mutable
inner: Mutex<TaskControlBlockInner>, inner: UPSafeCell<TaskControlBlockInner>,
} }
pub struct TaskControlBlockInner { pub struct TaskControlBlockInner {
pub trap_cx_ppn: PhysPageNum, pub trap_cx_ppn: PhysPageNum,
pub base_size: usize, pub base_size: usize,
pub task_cx_ptr: usize, pub task_cx: TaskContext,
pub task_status: TaskStatus, pub task_status: TaskStatus,
pub memory_set: MemorySet, pub memory_set: MemorySet,
pub parent: Option<Weak<TaskControlBlock>>, pub parent: Option<Weak<TaskControlBlock>>,
@ -37,9 +38,6 @@ pub struct TaskControlBlockInner {
} }
impl TaskControlBlockInner { impl TaskControlBlockInner {
pub fn get_task_cx_ptr2(&self) -> *const usize {
&self.task_cx_ptr as *const usize
}
pub fn get_trap_cx(&self) -> &'static mut TrapContext { pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut() self.trap_cx_ppn.get_mut()
} }
@ -64,8 +62,8 @@ impl TaskControlBlockInner {
} }
impl TaskControlBlock { impl TaskControlBlock {
pub fn acquire_inner_lock(&self) -> MutexGuard<TaskControlBlockInner> { pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> {
self.inner.lock() self.inner.exclusive_access()
} }
pub fn new(elf_data: &[u8]) -> Self { pub fn new(elf_data: &[u8]) -> Self {
// memory_set with elf program headers/trampoline/trap context/user stack // memory_set with elf program headers/trampoline/trap context/user stack
@ -78,15 +76,13 @@ impl TaskControlBlock {
let pid_handle = pid_alloc(); let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle); let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top(); let kernel_stack_top = kernel_stack.get_top();
// push a task context which goes to trap_return to the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
let task_control_block = Self { let task_control_block = Self {
pid: pid_handle, pid: pid_handle,
kernel_stack, kernel_stack,
inner: Mutex::new(TaskControlBlockInner { inner: unsafe { UPSafeCell::new(TaskControlBlockInner {
trap_cx_ppn, trap_cx_ppn,
base_size: user_sp, base_size: user_sp,
task_cx_ptr: task_cx_ptr as usize, task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready, task_status: TaskStatus::Ready,
memory_set, memory_set,
parent: None, parent: None,
@ -100,14 +96,14 @@ impl TaskControlBlock {
// 2 -> stderr // 2 -> stderr
Some(Arc::new(Stdout)), Some(Arc::new(Stdout)),
], ],
}), })},
}; };
// prepare TrapContext in user space // prepare TrapContext in user space
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx(); let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx();
*trap_cx = TrapContext::app_init_context( *trap_cx = TrapContext::app_init_context(
entry_point, entry_point,
user_sp, user_sp,
KERNEL_SPACE.lock().token(), KERNEL_SPACE.exclusive_access().token(),
kernel_stack_top, kernel_stack_top,
trap_handler as usize, trap_handler as usize,
); );
@ -145,8 +141,8 @@ impl TaskControlBlock {
// make the user_sp aligned to 8B for k210 platform // make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>(); user_sp -= user_sp % core::mem::size_of::<usize>();
// **** hold current PCB lock // **** access current TCB exclusively
let mut inner = self.acquire_inner_lock(); let mut inner = self.inner_exclusive_access();
// substitute memory_set // substitute memory_set
inner.memory_set = memory_set; inner.memory_set = memory_set;
// update trap_cx ppn // update trap_cx ppn
@ -155,18 +151,18 @@ impl TaskControlBlock {
let mut trap_cx = TrapContext::app_init_context( let mut trap_cx = TrapContext::app_init_context(
entry_point, entry_point,
user_sp, user_sp,
KERNEL_SPACE.lock().token(), KERNEL_SPACE.exclusive_access().token(),
self.kernel_stack.get_top(), self.kernel_stack.get_top(),
trap_handler as usize, trap_handler as usize,
); );
trap_cx.x[10] = args.len(); trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base; trap_cx.x[11] = argv_base;
*inner.get_trap_cx() = trap_cx; *inner.get_trap_cx() = trap_cx;
// **** release current PCB lock // **** release current PCB
} }
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> { pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
// ---- hold parent PCB lock // ---- hold parent PCB lock
let mut parent_inner = self.acquire_inner_lock(); let mut parent_inner = self.inner_exclusive_access();
// copy user space(include trap context) // copy user space(include trap context)
let memory_set = MemorySet::from_existed_user( let memory_set = MemorySet::from_existed_user(
&parent_inner.memory_set &parent_inner.memory_set
@ -179,8 +175,6 @@ impl TaskControlBlock {
let pid_handle = pid_alloc(); let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle); let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top(); let kernel_stack_top = kernel_stack.get_top();
// push a goto_trap_return task_cx on the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
// copy fd table // copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new(); let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent_inner.fd_table.iter() { for fd in parent_inner.fd_table.iter() {
@ -193,28 +187,28 @@ impl TaskControlBlock {
let task_control_block = Arc::new(TaskControlBlock { let task_control_block = Arc::new(TaskControlBlock {
pid: pid_handle, pid: pid_handle,
kernel_stack, kernel_stack,
inner: Mutex::new(TaskControlBlockInner { inner: unsafe { UPSafeCell::new(TaskControlBlockInner {
trap_cx_ppn, trap_cx_ppn,
base_size: parent_inner.base_size, base_size: parent_inner.base_size,
task_cx_ptr: task_cx_ptr as usize, task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready, task_status: TaskStatus::Ready,
memory_set, memory_set,
parent: Some(Arc::downgrade(self)), parent: Some(Arc::downgrade(self)),
children: Vec::new(), children: Vec::new(),
exit_code: 0, exit_code: 0,
fd_table: new_fd_table, fd_table: new_fd_table,
}), })},
}); });
// add child // add child
parent_inner.children.push(task_control_block.clone()); parent_inner.children.push(task_control_block.clone());
// modify kernel_sp in trap_cx // modify kernel_sp in trap_cx
// **** acquire child PCB lock // **** access child PCB exclusively
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx(); let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx();
// **** release child PCB lock
trap_cx.kernel_sp = kernel_stack_top; trap_cx.kernel_sp = kernel_stack_top;
// return // return
task_control_block task_control_block
// ---- release parent PCB lock // **** release child PCB
// ---- release parent PCB
} }
pub fn getpid(&self) -> usize { pub fn getpid(&self) -> usize {
self.pid.0 self.pid.0

View file

@ -103,10 +103,15 @@ pub fn trap_return() -> ! {
} }
let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE; let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE;
unsafe { unsafe {
llvm_asm!("fence.i" :::: "volatile"); asm!(
llvm_asm!("jr $0" :: "r"(restore_va), "{a0}"(trap_cx_ptr), "{a1}"(user_satp) :: "volatile"); "fence.i",
"jr {restore_va}",
restore_va = in(reg) restore_va,
in("a0") trap_cx_ptr,
in("a1") user_satp,
options(noreturn)
);
} }
panic!("Unreachable in back_to_user!");
} }
#[no_mangle] #[no_mangle]

View file

@ -1 +1 @@
nightly-2021-01-30 nightly-2021-07-15

View file

@ -14,7 +14,7 @@ fn fork_child(cur: &str, branch: char) {
if l >= DEPTH { if l >= DEPTH {
return; return;
} }
&mut next[..l].copy_from_slice(cur.as_bytes()); next[..l].copy_from_slice(cur.as_bytes());
next[l] = branch as u8; next[l] = branch as u8;
if fork() == 0 { if fork() == 0 {
fork_tree(core::str::from_utf8(&next[..l + 1]).unwrap()); fork_tree(core::str::from_utf8(&next[..l + 1]).unwrap());

View file

@ -0,0 +1,36 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
use user_lib::{
OpenFlags,
open,
close,
write,
get_time,
};
#[no_mangle]
pub fn main() -> i32 {
let mut buffer = [0u8; 1024]; // 1KiB
for i in 0..buffer.len() {
buffer[i] = i as u8;
}
let f = open("testf", OpenFlags::CREATE | OpenFlags::WRONLY);
if f < 0 {
panic!("Open test file failed!");
}
let f = f as usize;
let start = get_time();
let size_mb = 5usize;
for _ in 0..1024*size_mb {
write(f, &buffer);
}
close(f);
let time_ms = (get_time() - start) as usize;
let speed_kbs = size_mb * 1000000 / time_ms;
println!("time cost = {}ms, write speed = {}KiB/s", time_ms, speed_kbs);
0
}

View file

@ -1,5 +1,5 @@
#![no_std] #![no_std]
#![feature(llvm_asm)] #![feature(asm)]
#![feature(linkage)] #![feature(linkage)]
#![feature(panic_info_message)] #![feature(panic_info_message)]
#![feature(alloc_error_handler)] #![feature(alloc_error_handler)]

View file

@ -15,11 +15,12 @@ const SYSCALL_WAITPID: usize = 260;
fn syscall(id: usize, args: [usize; 3]) -> isize { fn syscall(id: usize, args: [usize; 3]) -> isize {
let mut ret: isize; let mut ret: isize;
unsafe { unsafe {
llvm_asm!("ecall" asm!(
: "={x10}" (ret) "ecall",
: "{x10}" (args[0]), "{x11}" (args[1]), "{x12}" (args[2]), "{x17}" (id) inlateout("x10") args[0] => ret,
: "memory" in("x11") args[1],
: "volatile" in("x12") args[2],
in("x17") id
); );
} }
ret ret