Merge recent updates from ch8

This commit is contained in:
Yifan Wu 2022-01-26 01:53:39 -08:00
commit e1ae9b6236
99 changed files with 1959 additions and 1437 deletions

View file

@ -36,9 +36,9 @@ build: env switch-check $(KERNEL_BIN) fs-img
switch-check:
ifeq ($(BOARD), qemu)
(which last-qemu) || (rm last-k210 -f && touch last-qemu && make clean)
(which last-qemu) || (rm -f last-k210 && touch last-qemu && make clean)
else ifeq ($(BOARD), k210)
(which last-k210) || (rm last-qemu -f && touch last-k210 && make clean)
(which last-k210) || (rm -f last-qemu && touch last-k210 && make clean)
endif
env:
@ -57,7 +57,7 @@ $(KERNEL_BIN): kernel
fs-img: $(APPS)
@cd ../user && make build
@rm $(FS_IMG) -f
@rm -f $(FS_IMG)
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
$(APPS):

23
os/src/boards/k210.rs Normal file
View file

@ -0,0 +1,23 @@
pub const CLOCK_FREQ: usize = 403000000 / 62;
pub const MMIO: &[(usize, usize)] = &[
// we don't need clint in S priv when running
// we only need claim/complete for target0 after initializing
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
];
pub type BlockDeviceImpl = crate::drivers::block::SDCardWrapper;

6
os/src/boards/qemu.rs Normal file
View file

@ -0,0 +1,6 @@
pub const CLOCK_FREQ: usize = 12500000;
pub const MMIO: &[(usize, usize)] = &[(0x10001000, 0x1000)];
pub type BlockDeviceImpl = crate::drivers::block::VirtIOBlock;

View file

@ -10,33 +10,5 @@ pub const PAGE_SIZE_BITS: usize = 0xc;
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
#[cfg(feature = "board_k210")]
pub const CLOCK_FREQ: usize = 403000000 / 62;
pub use crate::board::{CLOCK_FREQ, MMIO};
#[cfg(feature = "board_qemu")]
pub const CLOCK_FREQ: usize = 12500000;
#[cfg(feature = "board_qemu")]
pub const MMIO: &[(usize, usize)] = &[
(0x10001000, 0x1000),
];
#[cfg(feature = "board_k210")]
pub const MMIO: &[(usize, usize)] = &[
// we don't need clint in S priv when running
// we only need claim/complete for target0 after initializing
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
];

View file

@ -1,5 +1,5 @@
use core::fmt::{self, Write};
use crate::sbi::console_putchar;
use core::fmt::{self, Write};
struct Stdout;
@ -29,5 +29,3 @@ macro_rules! println {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
}
}

View file

@ -1,15 +1,13 @@
mod virtio_blk;
mod sdcard;
mod virtio_blk;
pub use virtio_blk::VirtIOBlock;
pub use sdcard::SDCardWrapper;
use lazy_static::*;
use alloc::sync::Arc;
use easy_fs::BlockDevice;
#[cfg(feature = "board_qemu")]
type BlockDeviceImpl = virtio_blk::VirtIOBlock;
#[cfg(feature = "board_k210")]
type BlockDeviceImpl = sdcard::SDCardWrapper;
use lazy_static::*;
use crate::board::BlockDeviceImpl;
lazy_static! {
pub static ref BLOCK_DEVICE: Arc<dyn BlockDevice> = Arc::new(BlockDeviceImpl::new());
@ -21,10 +19,12 @@ pub fn block_device_test() {
let mut write_buffer = [0u8; 512];
let mut read_buffer = [0u8; 512];
for i in 0..512 {
for byte in write_buffer.iter_mut() { *byte = i as u8; }
for byte in write_buffer.iter_mut() {
*byte = i as u8;
}
block_device.write_block(i as usize, &write_buffer);
block_device.read_block(i as usize, &mut read_buffer);
assert_eq!(write_buffer, read_buffer);
}
println!("block device test passed!");
}
}

View file

@ -2,21 +2,21 @@
#![allow(non_camel_case_types)]
#![allow(unused)]
use k210_pac::{Peripherals, SPI0};
use super::BlockDevice;
use crate::sync::UPSafeCell;
use core::convert::TryInto;
use k210_hal::prelude::*;
use k210_pac::{Peripherals, SPI0};
use k210_soc::{
fpioa::{self, io},
//dmac::{dma_channel, DMAC, DMACExt},
gpio,
gpiohs,
spi::{aitm, frame_format, tmod, work_mode, SPI, SPIExt, SPIImpl},
fpioa::{self, io},
sysctl,
sleep::usleep,
spi::{aitm, frame_format, tmod, work_mode, SPIExt, SPIImpl, SPI},
sysctl,
};
use crate::sync::UPSafeCell;
use lazy_static::*;
use super::BlockDevice;
use core::convert::TryInto;
pub struct SDCard<SPI> {
spi: SPI,
@ -160,7 +160,11 @@ pub struct SDCardInfo {
}
impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
pub fn new(spi: X, spi_cs: u32, cs_gpionum: u8/*, dmac: &'a DMAC, channel: dma_channel*/) -> Self {
pub fn new(
spi: X,
spi_cs: u32,
cs_gpionum: u8, /*, dmac: &'a DMAC, channel: dma_channel*/
) -> Self {
Self {
spi,
spi_cs,
@ -310,7 +314,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
timeout -= 1;
}
/* After time out */
return 0xFF;
0xFF
}
/*
@ -337,7 +341,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
self.read_data(response);
}
/* Return response */
return 0;
0
}
/*
@ -367,7 +371,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
self.read_data(&mut csd_tab);
self.end_cmd();
/* see also: https://cdn-shop.adafruit.com/datasheets/TS16GUSDHC6.pdf */
return Ok(SDCardCSD {
Ok(SDCardCSD {
/* Byte 0 */
CSDStruct: (csd_tab[0] & 0xC0) >> 6,
SysSpecVersion: (csd_tab[0] & 0x3C) >> 2,
@ -419,8 +423,8 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Byte 15 */
CSD_CRC: (csd_tab[15] & 0xFE) >> 1,
Reserved4: 1,
/* Return the response */
});
/* Return the reponse */
})
}
/*
@ -449,7 +453,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Get CRC bytes (not really needed by us, but required by SD) */
self.read_data(&mut cid_tab);
self.end_cmd();
return Ok(SDCardCID {
Ok(SDCardCID {
/* Byte 0 */
ManufacturerID: cid_tab[0],
/* Byte 1, 2 */
@ -474,7 +478,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Byte 15 */
CID_CRC: (cid_tab[15] & 0xFE) >> 1,
Reserved2: 1,
});
})
}
/*
@ -606,7 +610,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
}
let mut error = false;
//let mut dma_chunk = [0u32; SEC_LEN];
let mut tmp_chunk= [0u8; SEC_LEN];
let mut tmp_chunk = [0u8; SEC_LEN];
for chunk in data_buf.chunks_mut(SEC_LEN) {
if self.get_response() != SD_START_DATA_SINGLE_BLOCK_READ {
error = true;
@ -616,7 +620,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
//self.read_data_dma(&mut dma_chunk);
self.read_data(&mut tmp_chunk);
/* Place the data received as u32 units from DMA into the u8 target buffer */
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/tmp_chunk.iter()) {
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/ tmp_chunk.iter()) {
//*a = (b & 0xff) as u8;
*a = *b;
}
@ -675,12 +679,12 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Send the data token to signify the start of the data */
self.write_data(&frame);
/* Write the block data to SD : write count data by block */
for (a, &b) in /*dma_chunk*/tmp_chunk.iter_mut().zip(chunk.iter()) {
for (a, &b) in /*dma_chunk*/ tmp_chunk.iter_mut().zip(chunk.iter()) {
//*a = b.into();
*a = b;
}
//self.write_data_dma(&mut dma_chunk);
self.write_data(&mut tmp_chunk);
self.write_data(&tmp_chunk);
/* Put dummy CRC bytes */
self.write_data(&[0xff, 0xff]);
/* Read data response */
@ -711,9 +715,8 @@ fn io_init() {
}
lazy_static! {
static ref PERIPHERALS: UPSafeCell<Peripherals> = unsafe {
UPSafeCell::new(Peripherals::take().unwrap())
};
static ref PERIPHERALS: UPSafeCell<Peripherals> =
unsafe { UPSafeCell::new(Peripherals::take().unwrap()) };
}
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
@ -747,9 +750,15 @@ impl SDCardWrapper {
impl BlockDevice for SDCardWrapper {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access().read_sector(buf,block_id as u32).unwrap();
self.0
.exclusive_access()
.read_sector(buf, block_id as u32)
.unwrap();
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access().write_sector(buf,block_id as u32).unwrap();
self.0
.exclusive_access()
.write_sector(buf, block_id as u32)
.unwrap();
}
}

View file

@ -1,20 +1,12 @@
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
use crate::mm::{
PhysAddr,
VirtAddr,
frame_alloc,
frame_dealloc,
PhysPageNum,
FrameTracker,
StepByOne,
PageTable,
kernel_token,
};
use super::BlockDevice;
use crate::mm::{
frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum,
StepByOne, VirtAddr,
};
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
@ -22,21 +14,21 @@ const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
lazy_static! {
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe {
UPSafeCell::new(Vec::new())
};
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe { UPSafeCell::new(Vec::new()) };
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
self.0
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
self.0
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
@ -44,9 +36,9 @@ impl VirtIOBlock {
#[allow(unused)]
pub fn new() -> Self {
unsafe {
Self(UPSafeCell::new(VirtIOBlk::new(
&mut *(VIRTIO0 as *mut VirtIOHeader)
).unwrap()))
Self(UPSafeCell::new(
VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap(),
))
}
}
}
@ -56,7 +48,9 @@ pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
let mut ppn_base = PhysPageNum(0);
for i in 0..pages {
let frame = frame_alloc().unwrap();
if i == 0 { ppn_base = frame.ppn; }
if i == 0 {
ppn_base = frame.ppn;
}
assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.exclusive_access().push(frame);
}
@ -80,5 +74,7 @@ pub extern "C" fn virtio_phys_to_virt(paddr: PhysAddr) -> VirtAddr {
#[no_mangle]
pub extern "C" fn virtio_virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
PageTable::from_token(kernel_token()).translate_va(vaddr).unwrap()
PageTable::from_token(kernel_token())
.translate_va(vaddr)
.unwrap()
}

View file

@ -1,3 +1,3 @@
mod block;
pub mod block;
pub use block::BLOCK_DEVICE;
pub use block::BLOCK_DEVICE;

View file

@ -1,15 +1,12 @@
use easy_fs::{
EasyFileSystem,
Inode,
};
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use alloc::sync::Arc;
use lazy_static::*;
use bitflags::*;
use alloc::vec::Vec;
use super::File;
use crate::mm::UserBuffer;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use lazy_static::*;
pub struct OSInode {
readable: bool,
@ -23,18 +20,11 @@ pub struct OSInodeInner {
}
impl OSInode {
pub fn new(
readable: bool,
writable: bool,
inode: Arc<Inode>,
) -> Self {
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
Self {
readable,
writable,
inner: unsafe { UPSafeCell::new(OSInodeInner {
offset: 0,
inode,
})},
inner: unsafe { UPSafeCell::new(OSInodeInner { offset: 0, inode }) },
}
}
pub fn read_all(&self) -> Vec<u8> {
@ -98,40 +88,30 @@ pub fn open_file(name: &str, flags: OpenFlags) -> Option<Arc<OSInode>> {
if let Some(inode) = ROOT_INODE.find(name) {
// clear size
inode.clear();
Some(Arc::new(OSInode::new(
readable,
writable,
inode,
)))
Some(Arc::new(OSInode::new(readable, writable, inode)))
} else {
// create file
ROOT_INODE.create(name)
.map(|inode| {
Arc::new(OSInode::new(
readable,
writable,
inode,
))
})
ROOT_INODE
.create(name)
.map(|inode| Arc::new(OSInode::new(readable, writable, inode)))
}
} else {
ROOT_INODE.find(name)
.map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(
readable,
writable,
inode
))
})
ROOT_INODE.find(name).map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(readable, writable, inode))
})
}
}
impl File for OSInode {
fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable }
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, mut buf: UserBuffer) -> usize {
let mut inner = self.inner.exclusive_access();
let mut total_read_size = 0usize;
@ -156,4 +136,4 @@ impl File for OSInode {
}
total_write_size
}
}
}

View file

@ -1,16 +1,16 @@
mod inode;
mod pipe;
mod stdio;
mod inode;
use crate::mm::UserBuffer;
pub trait File : Send + Sync {
pub trait File: Send + Sync {
fn readable(&self) -> bool;
fn writable(&self) -> bool;
fn read(&self, buf: UserBuffer) -> usize;
fn write(&self, buf: UserBuffer) -> usize;
}
pub use pipe::{Pipe, make_pipe};
pub use inode::{list_apps, open_file, OSInode, OpenFlags};
pub use pipe::{make_pipe, Pipe};
pub use stdio::{Stdin, Stdout};
pub use inode::{OSInode, open_file, OpenFlags, list_apps};

View file

@ -1,7 +1,7 @@
use super::File;
use alloc::sync::{Arc, Weak};
use crate::sync::UPSafeCell;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use alloc::sync::{Arc, Weak};
use crate::task::suspend_current_and_run_next;
@ -32,9 +32,9 @@ const RING_BUFFER_SIZE: usize = 32;
#[derive(Copy, Clone, PartialEq)]
enum RingBufferStatus {
FULL,
EMPTY,
NORMAL,
Full,
Empty,
Normal,
}
pub struct PipeRingBuffer {
@ -51,7 +51,7 @@ impl PipeRingBuffer {
arr: [0; RING_BUFFER_SIZE],
head: 0,
tail: 0,
status: RingBufferStatus::EMPTY,
status: RingBufferStatus::Empty,
write_end: None,
}
}
@ -59,35 +59,33 @@ impl PipeRingBuffer {
self.write_end = Some(Arc::downgrade(write_end));
}
pub fn write_byte(&mut self, byte: u8) {
self.status = RingBufferStatus::NORMAL;
self.status = RingBufferStatus::Normal;
self.arr[self.tail] = byte;
self.tail = (self.tail + 1) % RING_BUFFER_SIZE;
if self.tail == self.head {
self.status = RingBufferStatus::FULL;
self.status = RingBufferStatus::Full;
}
}
pub fn read_byte(&mut self) -> u8 {
self.status = RingBufferStatus::NORMAL;
self.status = RingBufferStatus::Normal;
let c = self.arr[self.head];
self.head = (self.head + 1) % RING_BUFFER_SIZE;
if self.head == self.tail {
self.status = RingBufferStatus::EMPTY;
self.status = RingBufferStatus::Empty;
}
c
}
pub fn available_read(&self) -> usize {
if self.status == RingBufferStatus::EMPTY {
if self.status == RingBufferStatus::Empty {
0
} else if self.tail > self.head {
self.tail - self.head
} else {
if self.tail > self.head {
self.tail - self.head
} else {
self.tail + RING_BUFFER_SIZE - self.head
}
self.tail + RING_BUFFER_SIZE - self.head
}
}
pub fn available_write(&self) -> usize {
if self.status == RingBufferStatus::FULL {
if self.status == RingBufferStatus::Full {
0
} else {
RING_BUFFER_SIZE - self.available_read()
@ -100,24 +98,22 @@ impl PipeRingBuffer {
/// Return (read_end, write_end)
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
let buffer = Arc::new(unsafe {
UPSafeCell::new(PipeRingBuffer::new())
});
let read_end = Arc::new(
Pipe::read_end_with_buffer(buffer.clone())
);
let write_end = Arc::new(
Pipe::write_end_with_buffer(buffer.clone())
);
let buffer = Arc::new(unsafe { UPSafeCell::new(PipeRingBuffer::new()) });
let read_end = Arc::new(Pipe::read_end_with_buffer(buffer.clone()));
let write_end = Arc::new(Pipe::write_end_with_buffer(buffer.clone()));
buffer.exclusive_access().set_write_end(&write_end);
(read_end, write_end)
}
impl File for Pipe {
fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable }
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, buf: UserBuffer) -> usize {
assert_eq!(self.readable(), true);
assert!(self.readable());
let mut buf_iter = buf.into_iter();
let mut read_size = 0usize;
loop {
@ -134,7 +130,9 @@ impl File for Pipe {
// read at most loop_read bytes
for _ in 0..loop_read {
if let Some(byte_ref) = buf_iter.next() {
unsafe { *byte_ref = ring_buffer.read_byte(); }
unsafe {
*byte_ref = ring_buffer.read_byte();
}
read_size += 1;
} else {
return read_size;
@ -143,7 +141,7 @@ impl File for Pipe {
}
}
fn write(&self, buf: UserBuffer) -> usize {
assert_eq!(self.writable(), true);
assert!(self.writable());
let mut buf_iter = buf.into_iter();
let mut write_size = 0usize;
loop {
@ -165,4 +163,4 @@ impl File for Pipe {
}
}
}
}
}

View file

@ -1,5 +1,5 @@
use super::File;
use crate::mm::{UserBuffer};
use crate::mm::UserBuffer;
use crate::sbi::console_getchar;
use crate::task::suspend_current_and_run_next;
@ -8,8 +8,12 @@ pub struct Stdin;
pub struct Stdout;
impl File for Stdin {
fn readable(&self) -> bool { true }
fn writable(&self) -> bool { false }
fn readable(&self) -> bool {
true
}
fn writable(&self) -> bool {
false
}
fn read(&self, mut user_buf: UserBuffer) -> usize {
assert_eq!(user_buf.len(), 1);
// busy loop
@ -24,7 +28,9 @@ impl File for Stdin {
}
}
let ch = c as u8;
unsafe { user_buf.buffers[0].as_mut_ptr().write_volatile(ch); }
unsafe {
user_buf.buffers[0].as_mut_ptr().write_volatile(ch);
}
1
}
fn write(&self, _user_buf: UserBuffer) -> usize {
@ -33,9 +39,13 @@ impl File for Stdin {
}
impl File for Stdout {
fn readable(&self) -> bool { false }
fn writable(&self) -> bool { true }
fn read(&self, _user_buf: UserBuffer) -> usize{
fn readable(&self) -> bool {
false
}
fn writable(&self) -> bool {
true
}
fn read(&self, _user_buf: UserBuffer) -> usize {
panic!("Cannot read from stdout!");
}
fn write(&self, user_buf: UserBuffer) -> usize {
@ -44,4 +54,4 @@ impl File for Stdout {
}
user_buf.len()
}
}
}

View file

@ -1,22 +1,23 @@
use core::panic::PanicInfo;
use core::arch::asm;
use crate::sbi::shutdown;
use crate::task::current_kstack_top;
use core::arch::asm;
use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
match info.location() {
Some(location) => {
println!("[kernel] panicked at '{}', {}:{}:{}",
info.message().unwrap(),
location.file(),
location.line(),
location.column()
);
}
None => println!("[kernel] panicked at '{}'", info.message().unwrap())
if let Some(location) = info.location() {
println!(
"[kernel] Panicked at {}:{} {}",
location.file(),
location.line(),
info.message().unwrap()
);
} else {
println!("[kernel] Panicked: {}", info.message().unwrap());
}
unsafe {
backtrace();
}
unsafe { backtrace(); }
shutdown()
}
@ -26,9 +27,11 @@ unsafe fn backtrace() {
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop { break; }
println!("#{}:ra={:#x}", i, *((fp-8) as *const usize));
fp = *((fp-16) as *const usize);
if fp == stop {
break;
}
println!("#{}:ra={:#x}", i, *((fp - 8) as *const usize));
fp = *((fp - 16) as *const usize);
}
println!("---END BACKTRACE---");
}

View file

@ -1,62 +0,0 @@
use alloc::vec::Vec;
use lazy_static::*;
pub fn get_num_app() -> usize {
extern "C" { fn _num_app(); }
unsafe { (_num_app as usize as *const usize).read_volatile() }
}
pub fn get_app_data(app_id: usize) -> &'static [u8] {
extern "C" { fn _num_app(); }
let num_app_ptr = _num_app as usize as *const usize;
let num_app = get_num_app();
let app_start = unsafe {
core::slice::from_raw_parts(num_app_ptr.add(1), num_app + 1)
};
assert!(app_id < num_app);
unsafe {
core::slice::from_raw_parts(
app_start[app_id] as *const u8,
app_start[app_id + 1] - app_start[app_id]
)
}
}
lazy_static! {
static ref APP_NAMES: Vec<&'static str> = {
let num_app = get_num_app();
extern "C" { fn _app_names(); }
let mut start = _app_names as usize as *const u8;
let mut v = Vec::new();
unsafe {
for _ in 0..num_app {
let mut end = start;
while end.read_volatile() != '\0' as u8 {
end = end.add(1);
}
let slice = core::slice::from_raw_parts(start, end as usize - start as usize);
let str = core::str::from_utf8(slice).unwrap();
v.push(str);
start = end.add(1);
}
}
v
};
}
#[allow(unused)]
pub fn get_app_data_by_name(name: &str) -> Option<&'static [u8]> {
let num_app = get_num_app();
(0..num_app)
.find(|&i| APP_NAMES[i] == name)
.map(|i| get_app_data(i))
}
pub fn list_apps() {
println!("/**** APPS ****");
for app in APP_NAMES.iter() {
println!("{}", app);
}
println!("**************/");
}

View file

@ -8,21 +8,28 @@ extern crate alloc;
#[macro_use]
extern crate bitflags;
use core::arch::global_asm;
#[cfg(feature = "board_k210")]
#[path = "boards/k210.rs"]
mod board;
#[cfg(not(any(feature = "board_k210")))]
#[path = "boards/qemu.rs"]
mod board;
#[macro_use]
mod console;
mod lang_items;
mod sbi;
mod syscall;
mod trap;
mod config;
mod drivers;
mod fs;
mod lang_items;
mod mm;
mod sbi;
mod sync;
mod syscall;
mod task;
mod timer;
mod sync;
mod mm;
mod fs;
mod drivers;
mod trap;
use core::arch::global_asm;
global_asm!(include_str!("entry.asm"));
@ -32,10 +39,8 @@ fn clear_bss() {
fn ebss();
}
unsafe {
core::slice::from_raw_parts_mut(
sbss as usize as *mut u8,
ebss as usize - sbss as usize,
).fill(0);
core::slice::from_raw_parts_mut(sbss as usize as *mut u8, ebss as usize - sbss as usize)
.fill(0);
}
}

View file

@ -1,5 +1,5 @@
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use super::PageTableEntry;
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use core::fmt::{self, Debug, Formatter};
const PA_WIDTH_SV39: usize = 56;
@ -52,35 +52,59 @@ impl Debug for PhysPageNum {
/// usize -> T: usize.into()
impl From<usize> for PhysAddr {
fn from(v: usize) -> Self { Self(v & ( (1 << PA_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << PA_WIDTH_SV39) - 1))
}
}
impl From<usize> for PhysPageNum {
fn from(v: usize) -> Self { Self(v & ( (1 << PPN_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << PPN_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtAddr {
fn from(v: usize) -> Self { Self(v & ( (1 << VA_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << VA_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtPageNum {
fn from(v: usize) -> Self { Self(v & ( (1 << VPN_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << VPN_WIDTH_SV39) - 1))
}
}
impl From<PhysAddr> for usize {
fn from(v: PhysAddr) -> Self { v.0 }
fn from(v: PhysAddr) -> Self {
v.0
}
}
impl From<PhysPageNum> for usize {
fn from(v: PhysPageNum) -> Self { v.0 }
fn from(v: PhysPageNum) -> Self {
v.0
}
}
impl From<VirtAddr> for usize {
fn from(v: VirtAddr) -> Self { v.0 }
fn from(v: VirtAddr) -> Self {
v.0
}
}
impl From<VirtPageNum> for usize {
fn from(v: VirtPageNum) -> Self { v.0 }
fn from(v: VirtPageNum) -> Self {
v.0
}
}
impl VirtAddr {
pub fn floor(&self) -> VirtPageNum { VirtPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> VirtPageNum { VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> VirtPageNum {
VirtPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> VirtPageNum {
VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<VirtAddr> for VirtPageNum {
fn from(v: VirtAddr) -> Self {
@ -89,13 +113,23 @@ impl From<VirtAddr> for VirtPageNum {
}
}
impl From<VirtPageNum> for VirtAddr {
fn from(v: VirtPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: VirtPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl PhysAddr {
pub fn floor(&self) -> PhysPageNum { PhysPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> PhysPageNum { PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> PhysPageNum {
PhysPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> PhysPageNum {
PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<PhysAddr> for PhysPageNum {
fn from(v: PhysAddr) -> Self {
@ -104,7 +138,9 @@ impl From<PhysAddr> for PhysPageNum {
}
}
impl From<PhysPageNum> for PhysAddr {
fn from(v: PhysPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: PhysPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl VirtPageNum {
@ -121,31 +157,23 @@ impl VirtPageNum {
impl PhysAddr {
pub fn get_ref<T>(&self) -> &'static T {
unsafe {
(self.0 as *const T).as_ref().unwrap()
}
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe {
(self.0 as *mut T).as_mut().unwrap()
}
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl PhysPageNum {
pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512)
}
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) }
}
pub fn get_bytes_array(&self) -> &'static mut [u8] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096)
}
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) }
}
pub fn get_mut<T>(&self) -> &'static mut T {
let pa: PhysAddr = self.clone().into();
let pa: PhysAddr = (*self).into();
pa.get_mut()
}
}
@ -165,41 +193,57 @@ impl StepByOne for PhysPageNum {
}
#[derive(Copy, Clone)]
pub struct SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
l: T,
r: T,
}
impl<T> SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(start: T, end: T) -> Self {
assert!(start <= end, "start {:?} > end {:?}!", start, end);
Self { l: start, r: end }
}
pub fn get_start(&self) -> T { self.l }
pub fn get_end(&self) -> T { self.r }
pub fn get_start(&self) -> T {
self.l
}
pub fn get_end(&self) -> T {
self.r
}
}
impl<T> IntoIterator for SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> IntoIterator for SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
type IntoIter = SimpleRangeIterator<T>;
fn into_iter(self) -> Self::IntoIter {
SimpleRangeIterator::new(self.l, self.r)
}
}
pub struct SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
current: T,
end: T,
}
impl<T> SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(l: T, r: T) -> Self {
Self { current: l, end: r, }
Self { current: l, end: r }
}
}
impl<T> Iterator for SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> Iterator for SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.current == self.end {

View file

@ -1,9 +1,9 @@
use super::{PhysAddr, PhysPageNum};
use alloc::vec::Vec;
use crate::sync::UPSafeCell;
use crate::config::MEMORY_END;
use lazy_static::*;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
pub struct FrameTracker {
pub ppn: PhysPageNum,
@ -62,22 +62,17 @@ impl FrameAllocator for StackFrameAllocator {
fn alloc(&mut self) -> Option<PhysPageNum> {
if let Some(ppn) = self.recycled.pop() {
Some(ppn.into())
} else if self.current == self.end {
None
} else {
if self.current == self.end {
None
} else {
self.current += 1;
Some((self.current - 1).into())
}
self.current += 1;
Some((self.current - 1).into())
}
}
fn dealloc(&mut self, ppn: PhysPageNum) {
let ppn = ppn.0;
// validity check
if ppn >= self.current || self.recycled
.iter()
.find(|&v| {*v == ppn})
.is_some() {
if ppn >= self.current || self.recycled.iter().any(|&v| v == ppn) {
panic!("Frame ppn={:#x} has not been allocated!", ppn);
}
// recycle
@ -88,31 +83,29 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> = unsafe {
UPSafeCell::new(FrameAllocatorImpl::new())
};
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> =
unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) };
}
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
}
FRAME_ALLOCATOR
.exclusive_access()
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
FRAME_ALLOCATOR.exclusive_access().init(
PhysAddr::from(ekernel as usize).ceil(),
PhysAddr::from(MEMORY_END).floor(),
);
}
pub fn frame_alloc() -> Option<FrameTracker> {
FRAME_ALLOCATOR
.exclusive_access()
.alloc()
.map(|ppn| FrameTracker::new(ppn))
.map(FrameTracker::new)
}
pub fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR
.exclusive_access()
.dealloc(ppn);
FRAME_ALLOCATOR.exclusive_access().dealloc(ppn);
}
#[allow(unused)]
@ -131,4 +124,4 @@ pub fn frame_allocator_test() {
}
drop(v);
println!("frame_allocator_test passed!");
}
}

View file

@ -1,5 +1,5 @@
use buddy_system_allocator::LockedHeap;
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
@ -36,8 +36,8 @@ pub fn heap_test() {
for i in 0..500 {
v.push(i);
}
for i in 0..500 {
assert_eq!(v[i], i);
for (i, val) in v.iter().take(500).enumerate() {
assert_eq!(*val, i);
}
assert!(bss_range.contains(&(v.as_ptr() as usize)));
drop(v);

View file

@ -1,20 +1,15 @@
use super::{PageTable, PageTableEntry, PTEFlags};
use super::{VirtPageNum, VirtAddr, PhysPageNum, PhysAddr};
use super::{FrameTracker, frame_alloc};
use super::{VPNRange, StepByOne};
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use riscv::register::satp;
use alloc::sync::Arc;
use lazy_static::*;
use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
use super::{StepByOne, VPNRange};
use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE};
use crate::sync::UPSafeCell;
use crate::config::{
MEMORY_END,
PAGE_SIZE,
TRAMPOLINE,
MMIO,
};
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::arch::asm;
use lazy_static::*;
use riscv::register::satp;
extern "C" {
fn stext();
@ -30,9 +25,8 @@ extern "C" {
}
lazy_static! {
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> = Arc::new(unsafe {
UPSafeCell::new(MemorySet::new_kernel())
});
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
}
pub fn kernel_token() -> usize {
@ -55,17 +49,24 @@ impl MemorySet {
self.page_table.token()
}
/// Assume that no conflicts.
pub fn insert_framed_area(&mut self, start_va: VirtAddr, end_va: VirtAddr, permission: MapPermission) {
self.push(MapArea::new(
start_va,
end_va,
MapType::Framed,
permission,
), None);
pub fn insert_framed_area(
&mut self,
start_va: VirtAddr,
end_va: VirtAddr,
permission: MapPermission,
) {
self.push(
MapArea::new(start_va, end_va, MapType::Framed, permission),
None,
);
}
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self.areas.iter_mut().enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn) {
if let Some((idx, area)) = self
.areas
.iter_mut()
.enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn)
{
area.unmap(&mut self.page_table);
self.areas.remove(idx);
}
@ -94,50 +95,71 @@ impl MemorySet {
println!(".text [{:#x}, {:#x})", stext as usize, etext as usize);
println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize);
println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize);
println!(".bss [{:#x}, {:#x})", sbss_with_stack as usize, ebss as usize);
println!(
".bss [{:#x}, {:#x})",
sbss_with_stack as usize, ebss as usize
);
println!("mapping .text section");
memory_set.push(MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
), None);
memory_set.push(
MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
),
None,
);
println!("mapping .rodata section");
memory_set.push(MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
), None);
memory_set.push(
MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
),
None,
);
println!("mapping .data section");
memory_set.push(MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping .bss section");
memory_set.push(MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping physical memory");
memory_set.push(MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
memory_set.push(
MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
),
None,
);
println!("mapping .bss section");
memory_set.push(
MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping physical memory");
memory_set.push(
MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(
MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
}
memory_set
}
@ -161,26 +183,31 @@ impl MemorySet {
let end_va: VirtAddr = ((ph.virtual_addr() + ph.mem_size()) as usize).into();
let mut map_perm = MapPermission::U;
let ph_flags = ph.flags();
if ph_flags.is_read() { map_perm |= MapPermission::R; }
if ph_flags.is_write() { map_perm |= MapPermission::W; }
if ph_flags.is_execute() { map_perm |= MapPermission::X; }
let map_area = MapArea::new(
start_va,
end_va,
MapType::Framed,
map_perm,
);
if ph_flags.is_read() {
map_perm |= MapPermission::R;
}
if ph_flags.is_write() {
map_perm |= MapPermission::W;
}
if ph_flags.is_execute() {
map_perm |= MapPermission::X;
}
let map_area = MapArea::new(start_va, end_va, MapType::Framed, map_perm);
max_end_vpn = map_area.vpn_range.get_end();
memory_set.push(
map_area,
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize])
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize]),
);
}
}
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
(memory_set, user_stack_base, elf.header.pt2.entry_point() as usize)
(
memory_set,
user_stack_base,
elf.header.pt2.entry_point() as usize,
)
}
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();
@ -194,7 +221,9 @@ impl MemorySet {
for vpn in area.vpn_range {
let src_ppn = user_space.translate(vpn).unwrap().ppn();
let dst_ppn = memory_set.translate(vpn).unwrap().ppn();
dst_ppn.get_bytes_array().copy_from_slice(src_ppn.get_bytes_array());
dst_ppn
.get_bytes_array()
.copy_from_slice(src_ppn.get_bytes_array());
}
}
memory_set
@ -227,7 +256,7 @@ impl MapArea {
start_va: VirtAddr,
end_va: VirtAddr,
map_type: MapType,
map_perm: MapPermission
map_perm: MapPermission,
) -> Self {
let start_vpn: VirtPageNum = start_va.floor();
let end_vpn: VirtPageNum = end_va.ceil();
@ -262,11 +291,8 @@ impl MapArea {
page_table.map(vpn, ppn, pte_flags);
}
pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) {
match self.map_type {
MapType::Framed => {
self.data_frames.remove(&vpn);
}
_ => {}
if self.map_type == MapType::Framed {
self.data_frames.remove(&vpn);
}
page_table.unmap(vpn);
}
@ -325,17 +351,26 @@ pub fn remap_test() {
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
assert_eq!(
kernel_space.page_table.translate(mid_text.floor()).unwrap().writable(),
false
assert!(
!kernel_space
.page_table
.translate(mid_text.floor())
.unwrap()
.writable(),
);
assert_eq!(
kernel_space.page_table.translate(mid_rodata.floor()).unwrap().writable(),
false,
assert!(
!kernel_space
.page_table
.translate(mid_rodata.floor())
.unwrap()
.writable(),
);
assert_eq!(
kernel_space.page_table.translate(mid_data.floor()).unwrap().executable(),
false,
assert!(
!kernel_space
.page_table
.translate(mid_data.floor())
.unwrap()
.executable(),
);
println!("remap_test passed!");
}

View file

@ -1,28 +1,22 @@
mod heap_allocator;
mod address;
mod frame_allocator;
mod page_table;
mod heap_allocator;
mod memory_set;
mod page_table;
use page_table::PTEFlags;
use address::VPNRange;
pub use address::{PhysAddr, VirtAddr, PhysPageNum, VirtPageNum, StepByOne};
pub use frame_allocator::{FrameTracker, frame_alloc, frame_dealloc,};
pub use page_table::{
PageTable,
PageTableEntry,
translated_byte_buffer,
translated_str,
translated_ref,
translated_refmut,
UserBuffer,
UserBufferIterator,
};
pub use memory_set::{MemorySet, KERNEL_SPACE, MapPermission, kernel_token};
pub use address::{PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
pub use frame_allocator::{frame_alloc, frame_dealloc, FrameTracker};
pub use memory_set::remap_test;
pub use memory_set::{kernel_token, MapPermission, MemorySet, KERNEL_SPACE};
use page_table::PTEFlags;
pub use page_table::{
translated_byte_buffer, translated_ref, translated_refmut, translated_str, PageTable,
PageTableEntry, UserBuffer, UserBufferIterator,
};
pub fn init() {
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();
KERNEL_SPACE.exclusive_access().activate();
}
}

View file

@ -1,15 +1,7 @@
use super::{
frame_alloc,
PhysPageNum,
FrameTracker,
VirtPageNum,
VirtAddr,
PhysAddr,
StepByOne
};
use alloc::vec::Vec;
use alloc::vec;
use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use bitflags::*;
bitflags! {
@ -38,9 +30,7 @@ impl PageTableEntry {
}
}
pub fn empty() -> Self {
PageTableEntry {
bits: 0,
}
PageTableEntry { bits: 0 }
}
pub fn ppn(&self) -> PhysPageNum {
(self.bits >> 10 & ((1usize << 44) - 1)).into()
@ -87,8 +77,8 @@ impl PageTable {
let idxs = vpn.indexes();
let mut ppn = self.root_ppn;
let mut result: Option<&mut PageTableEntry> = None;
for i in 0..3 {
let pte = &mut ppn.get_pte_array()[idxs[i]];
for (i, idx) in idxs.iter().enumerate() {
let pte = &mut ppn.get_pte_array()[*idx];
if i == 2 {
result = Some(pte);
break;
@ -106,8 +96,8 @@ impl PageTable {
let idxs = vpn.indexes();
let mut ppn = self.root_ppn;
let mut result: Option<&mut PageTableEntry> = None;
for i in 0..3 {
let pte = &mut ppn.get_pte_array()[idxs[i]];
for (i, idx) in idxs.iter().enumerate() {
let pte = &mut ppn.get_pte_array()[*idx];
if i == 2 {
result = Some(pte);
break;
@ -132,17 +122,15 @@ impl PageTable {
*pte = PageTableEntry::empty();
}
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.find_pte(vpn)
.map(|pte| {pte.clone()})
self.find_pte(vpn).map(|pte| *pte)
}
pub fn translate_va(&self, va: VirtAddr) -> Option<PhysAddr> {
self.find_pte(va.clone().floor())
.map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
self.find_pte(va.clone().floor()).map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
}
pub fn token(&self) -> usize {
8usize << 60 | self.root_ppn.0
@ -157,10 +145,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
while start < end {
let start_va = VirtAddr::from(start);
let mut vpn = start_va.floor();
let ppn = page_table
.translate(vpn)
.unwrap()
.ppn();
let ppn = page_table.translate(vpn).unwrap().ppn();
vpn.step();
let mut end_va: VirtAddr = vpn.into();
end_va = end_va.min(VirtAddr::from(end));
@ -180,7 +165,10 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
let mut string = String::new();
let mut va = ptr as usize;
loop {
let ch: u8 = *(page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut());
let ch: u8 = *(page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut());
if ch == 0 {
break;
}
@ -192,13 +180,19 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
pub fn translated_ref<T>(token: usize, ptr: *const T) -> &'static T {
let page_table = PageTable::from_token(token);
page_table.translate_va(VirtAddr::from(ptr as usize)).unwrap().get_ref()
page_table
.translate_va(VirtAddr::from(ptr as usize))
.unwrap()
.get_ref()
}
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
let page_table = PageTable::from_token(token);
let va = ptr as usize;
page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut()
page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut()
}
pub struct UserBuffer {

View file

@ -1,5 +1,7 @@
#![allow(unused)]
use core::arch::asm;
const SBI_SET_TIMER: usize = 0;
const SBI_CONSOLE_PUTCHAR: usize = 1;
const SBI_CONSOLE_GETCHAR: usize = 2;
@ -41,4 +43,3 @@ pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
panic!("It should shutdown!");
}

39
os/src/sync/condvar.rs Normal file
View file

@ -0,0 +1,39 @@
use crate::sync::{Mutex, UPSafeCell};
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Condvar {
pub inner: UPSafeCell<CondvarInner>,
}
pub struct CondvarInner {
pub wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl Condvar {
pub fn new() -> Self {
Self {
inner: unsafe {
UPSafeCell::new(CondvarInner {
wait_queue: VecDeque::new(),
})
},
}
}
pub fn signal(&self) {
let mut inner = self.inner.exclusive_access();
if let Some(task) = inner.wait_queue.pop_front() {
add_task(task);
}
}
pub fn wait(&self, mutex: Arc<dyn Mutex>) {
mutex.unlock();
let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap());
drop(inner);
block_current_and_run_next();
mutex.lock();
}
}

View file

@ -1,7 +1,9 @@
mod up;
mod condvar;
mod mutex;
mod semaphore;
mod up;
pub use up::UPSafeCell;
pub use mutex::{Mutex, MutexSpin, MutexBlocking};
pub use condvar::Condvar;
pub use mutex::{Mutex, MutexBlocking, MutexSpin};
pub use semaphore::Semaphore;
pub use up::UPSafeCell;

View file

@ -1,8 +1,8 @@
use super::UPSafeCell;
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use crate::task::TaskControlBlock;
use crate::task::{add_task, current_task};
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use alloc::{collections::VecDeque, sync::Arc};
pub trait Mutex: Sync + Send {
fn lock(&self);
@ -77,11 +77,12 @@ impl Mutex for MutexBlocking {
}
fn unlock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
assert_eq!(mutex_inner.locked, true);
mutex_inner.locked = false;
let mut mutex_inner = self.inner.exclusive_access();
assert!(mutex_inner.locked);
if let Some(waking_task) = mutex_inner.wait_queue.pop_front() {
add_task(waking_task);
} else {
mutex_inner.locked = false;
}
}
}

View file

@ -1,6 +1,6 @@
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{add_task, TaskControlBlock, current_task, block_current_and_run_next};
use crate::sync::UPSafeCell;
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Semaphore {
pub inner: UPSafeCell<SemaphoreInner>,
@ -14,12 +14,12 @@ pub struct SemaphoreInner {
impl Semaphore {
pub fn new(res_count: usize) -> Self {
Self {
inner: unsafe { UPSafeCell::new(
SemaphoreInner {
inner: unsafe {
UPSafeCell::new(SemaphoreInner {
count: res_count as isize,
wait_queue: VecDeque::new(),
}
)},
})
},
}
}

View file

@ -18,10 +18,12 @@ impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self { inner: RefCell::new(value) }
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
}
}

View file

@ -1,11 +1,6 @@
use crate::mm::{
UserBuffer,
translated_byte_buffer,
translated_refmut,
translated_str,
};
use crate::task::{current_user_token, current_process};
use crate::fs::{make_pipe, OpenFlags, open_file};
use crate::fs::{make_pipe, open_file, OpenFlags};
use crate::mm::{translated_byte_buffer, translated_refmut, translated_str, UserBuffer};
use crate::task::{current_process, current_user_token};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
@ -22,9 +17,7 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let file = file.clone();
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.write(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.write(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -44,9 +37,7 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
}
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.read(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.read(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -56,10 +47,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
let process = current_process();
let token = current_user_token();
let path = translated_str(token, path);
if let Some(inode) = open_file(
path.as_str(),
OpenFlags::from_bits(flags).unwrap()
) {
if let Some(inode) = open_file(path.as_str(), OpenFlags::from_bits(flags).unwrap()) {
let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);

View file

@ -7,6 +7,7 @@ const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93;
const SYSCALL_SLEEP: usize = 101;
const SYSCALL_YIELD: usize = 124;
const SYSCALL_KILL: usize = 129;
const SYSCALL_GET_TIME: usize = 169;
const SYSCALL_GETPID: usize = 172;
const SYSCALL_FORK: usize = 220;
@ -21,20 +22,23 @@ const SYSCALL_MUTEX_UNLOCK: usize = 1012;
const SYSCALL_SEMAPHORE_CREATE: usize = 1020;
const SYSCALL_SEMAPHORE_UP: usize = 1021;
const SYSCALL_SEMAPHORE_DOWN: usize = 1022;
const SYSCALL_CONDVAR_CREATE: usize = 1030;
const SYSCALL_CONDVAR_SIGNAL: usize = 1031;
const SYSCALL_CONDVAR_WAIT: usize = 1032;
mod fs;
mod process;
mod thread;
mod sync;
mod thread;
use fs::*;
use process::*;
use thread::*;
use sync::*;
use thread::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_DUP=> sys_dup(args[0]),
SYSCALL_DUP => sys_dup(args[0]),
SYSCALL_OPEN => sys_open(args[0] as *const u8, args[1] as u32),
SYSCALL_CLOSE => sys_close(args[0]),
SYSCALL_PIPE => sys_pipe(args[0] as *mut usize),
@ -43,6 +47,7 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
SYSCALL_EXIT => sys_exit(args[0] as i32),
SYSCALL_SLEEP => sys_sleep(args[0]),
SYSCALL_YIELD => sys_yield(),
SYSCALL_KILL => sys_kill(args[0], args[1] as u32),
SYSCALL_GET_TIME => sys_get_time(),
SYSCALL_GETPID => sys_getpid(),
SYSCALL_FORK => sys_fork(),
@ -54,10 +59,12 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
SYSCALL_MUTEX_CREATE => sys_mutex_create(args[0] == 1),
SYSCALL_MUTEX_LOCK => sys_mutex_lock(args[0]),
SYSCALL_MUTEX_UNLOCK => sys_mutex_unlock(args[0]),
SYSCALL_SEMAPHORE_CREATE => sys_semaphore_creare(args[0]),
SYSCALL_SEMAPHORE_CREATE => sys_semaphore_create(args[0]),
SYSCALL_SEMAPHORE_UP => sys_semaphore_up(args[0]),
SYSCALL_SEMAPHORE_DOWN => sys_semaphore_down(args[0]),
SYSCALL_CONDVAR_CREATE => sys_condvar_create(args[0]),
SYSCALL_CONDVAR_SIGNAL => sys_condvar_signal(args[0]),
SYSCALL_CONDVAR_WAIT => sys_condvar_wait(args[0], args[1]),
_ => panic!("Unsupported syscall_id: {}", syscall_id),
}
}

View file

@ -1,23 +1,13 @@
use crate::fs::{open_file, OpenFlags};
use crate::mm::{translated_ref, translated_refmut, translated_str};
use crate::task::{
suspend_current_and_run_next,
exit_current_and_run_next,
current_task,
current_process,
current_user_token,
current_process, current_task, current_user_token, exit_current_and_run_next, pid2process,
suspend_current_and_run_next, SignalFlags,
};
use crate::timer::get_time_ms;
use crate::mm::{
translated_str,
translated_refmut,
translated_ref,
};
use crate::fs::{
open_file,
OpenFlags,
};
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use alloc::string::String;
pub fn sys_exit(exit_code: i32) -> ! {
exit_current_and_run_next(exit_code);
@ -61,7 +51,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
break;
}
args_vec.push(translated_str(token, arg_str_ptr as *const u8));
unsafe { args = args.add(1); }
unsafe {
args = args.add(1);
}
}
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all();
@ -82,21 +74,19 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
// find a child process
let mut inner = process.inner_exclusive_access();
if inner.children
if !inner
.children
.iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()})
.is_none() {
.any(|p| pid == -1 || pid as usize == p.getpid())
{
return -1;
// ---- release current PCB
}
let pair = inner.children
.iter()
.enumerate()
.find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
let pair = inner.children.iter().enumerate().find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
let child = inner.children.remove(idx);
// confirm that child will be deallocated after being removed from children list
@ -112,3 +102,16 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
}
// ---- release current PCB automatically
}
pub fn sys_kill(pid: usize, signal: u32) -> isize {
if let Some(process) = pid2process(pid) {
if let Some(flag) = SignalFlags::from_bits(signal) {
process.inner_exclusive_access().signals |= flag;
0
} else {
-1
}
} else {
-1
}
}

View file

@ -1,6 +1,6 @@
use crate::task::{current_task, current_process, block_current_and_run_next};
use crate::sync::{MutexSpin, MutexBlocking, Semaphore};
use crate::timer::{get_time_ms, add_timer};
use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore};
use crate::task::{block_current_and_run_next, current_process, current_task};
use crate::timer::{add_timer, get_time_ms};
use alloc::sync::Arc;
pub fn sys_sleep(ms: usize) -> isize {
@ -13,21 +13,23 @@ pub fn sys_sleep(ms: usize) -> isize {
pub fn sys_mutex_create(blocking: bool) -> isize {
let process = current_process();
let mutex: Option<Arc<dyn Mutex>> = if !blocking {
Some(Arc::new(MutexSpin::new()))
} else {
Some(Arc::new(MutexBlocking::new()))
};
let mut process_inner = process.inner_exclusive_access();
if let Some(id) = process_inner
.mutex_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
process_inner.mutex_list[id] = if !blocking {
Some(Arc::new(MutexSpin::new()))
} else {
Some(Arc::new(MutexBlocking::new()))
};
.map(|(id, _)| id)
{
process_inner.mutex_list[id] = mutex;
id as isize
} else {
process_inner.mutex_list.push(Some(Arc::new(MutexSpin::new())));
process_inner.mutex_list.push(mutex);
process_inner.mutex_list.len() as isize - 1
}
}
@ -52,7 +54,7 @@ pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
0
}
pub fn sys_semaphore_creare(res_count: usize) -> isize {
pub fn sys_semaphore_create(res_count: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
@ -60,11 +62,14 @@ pub fn sys_semaphore_creare(res_count: usize) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
.map(|(id, _)| id)
{
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
id
} else {
process_inner.semaphore_list.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner
.semaphore_list
.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner.semaphore_list.len() - 1
};
id as isize
@ -87,3 +92,43 @@ pub fn sys_semaphore_down(sem_id: usize) -> isize {
sem.down();
0
}
pub fn sys_condvar_create(_arg: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
.condvar_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
id
} else {
process_inner
.condvar_list
.push(Some(Arc::new(Condvar::new())));
process_inner.condvar_list.len() - 1
};
id as isize
}
pub fn sys_condvar_signal(condvar_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
drop(process_inner);
condvar.signal();
0
}
pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
condvar.wait(mutex);
0
}

View file

@ -1,5 +1,9 @@
use crate::{
mm::kernel_token,
task::{add_task, current_task, TaskControlBlock},
trap::{trap_handler, TrapContext},
};
use alloc::sync::Arc;
use crate::{mm::kernel_token, task::{TaskControlBlock, add_task, current_task}, trap::{TrapContext, trap_handler}};
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
let task = current_task().unwrap();
@ -7,7 +11,11 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
// create a new thread
let new_task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
task.inner_exclusive_access().res.as_ref().unwrap().ustack_base,
task.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base,
true,
));
// add new task to scheduler
@ -35,7 +43,13 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
}
pub fn sys_gettid() -> isize {
current_task().unwrap().inner_exclusive_access().res.as_ref().unwrap().tid as isize
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.tid as isize
}
/// thread does not exist, return -1

View file

@ -23,4 +23,3 @@ impl TaskContext {
}
}
}

View file

@ -1,9 +1,12 @@
use alloc::{vec::Vec, sync::{Arc, Weak}};
use lazy_static::*;
use crate::sync::UPSafeCell;
use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use super::ProcessControlBlock;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use lazy_static::*;
pub struct RecycleAllocator {
current: usize,
@ -28,21 +31,19 @@ impl RecycleAllocator {
pub fn dealloc(&mut self, id: usize) {
assert!(id < self.current);
assert!(
self.recycled.iter().find(|i| **i == id).is_none(),
"id {} has been deallocated!", id
!self.recycled.iter().any(|i| *i == id),
"id {} has been deallocated!",
id
);
self.recycled.push(id);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
UPSafeCell::new(RecycleAllocator::new())
};
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
UPSafeCell::new(RecycleAllocator::new())
};
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
}
pub struct PidHandle(pub usize);
@ -69,13 +70,11 @@ pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
KERNEL_SPACE
.exclusive_access()
.insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack(kstack_id)
}
@ -91,11 +90,15 @@ impl Drop for KernelStack {
impl KernelStack {
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
@ -142,23 +145,19 @@ impl TaskUserRes {
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner
.memory_set
.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
process_inner.memory_set.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner
.memory_set
.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
process_inner.memory_set.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
fn dealloc_user_res(&self) {
@ -167,10 +166,14 @@ impl TaskUserRes {
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner.memory_set.remove_area_with_start_vpn(ustack_bottom_va.into());
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner.memory_set.remove_area_with_start_vpn(trap_cx_bottom_va.into());
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
#[allow(unused)]
@ -197,12 +200,18 @@ impl TaskUserRes {
let process = self.process.upgrade().unwrap();
let process_inner = process.inner_exclusive_access();
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner.memory_set.translate(trap_cx_bottom_va.into()).unwrap().ppn()
process_inner
.memory_set
.translate(trap_cx_bottom_va.into())
.unwrap()
.ppn()
}
pub fn ustack_base(&self) -> usize { self.ustack_base }
pub fn ustack_base(&self) -> usize {
self.ustack_base
}
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
}
@ -212,4 +221,3 @@ impl Drop for TaskUserRes {
self.dealloc_user_res();
}
}

View file

@ -1,6 +1,6 @@
use super::{ProcessControlBlock, TaskControlBlock};
use crate::sync::UPSafeCell;
use super::TaskControlBlock;
use alloc::collections::VecDeque;
use alloc::collections::{BTreeMap, VecDeque};
use alloc::sync::Arc;
use lazy_static::*;
@ -11,7 +11,9 @@ pub struct TaskManager {
/// A simple FIFO scheduler.
impl TaskManager {
pub fn new() -> Self {
Self { ready_queue: VecDeque::new(), }
Self {
ready_queue: VecDeque::new(),
}
}
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
@ -22,9 +24,10 @@ impl TaskManager {
}
lazy_static! {
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> = unsafe {
UPSafeCell::new(TaskManager::new())
};
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
pub static ref PID2PCB: UPSafeCell<BTreeMap<usize, Arc<ProcessControlBlock>>> =
unsafe { UPSafeCell::new(BTreeMap::new()) };
}
pub fn add_task(task: Arc<TaskControlBlock>) {
@ -34,3 +37,19 @@ pub fn add_task(task: Arc<TaskControlBlock>) {
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.exclusive_access().fetch()
}
pub fn pid2process(pid: usize) -> Option<Arc<ProcessControlBlock>> {
let map = PID2PCB.exclusive_access();
map.get(&pid).map(Arc::clone)
}
pub fn insert_into_pid2process(pid: usize, process: Arc<ProcessControlBlock>) {
PID2PCB.exclusive_access().insert(pid, process);
}
pub fn remove_from_pid2process(pid: usize) {
let mut map = PID2PCB.exclusive_access();
if map.remove(&pid).is_none() {
panic!("cannot find pid {} in pid2task!", pid);
}
}

View file

@ -1,38 +1,29 @@
mod context;
mod switch;
mod task;
mod manager;
mod processor;
mod id;
mod manager;
mod process;
mod processor;
mod signal;
mod switch;
#[allow(clippy::module_inception)]
mod task;
use crate::fs::{open_file, OpenFlags};
use switch::__switch;
use alloc::sync::Arc;
use manager::fetch_task;
use lazy_static::*;
use manager::fetch_task;
use process::ProcessControlBlock;
use switch::__switch;
pub use context::TaskContext;
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle};
pub use manager::{add_task, pid2process, remove_from_pid2process};
pub use processor::{
run_tasks,
current_task,
current_process,
current_user_token,
current_trap_cx_user_va,
current_trap_cx,
current_kstack_top,
take_current_task,
schedule,
current_kstack_top, current_process, current_task, current_trap_cx, current_trap_cx_user_va,
current_user_token, run_tasks, schedule, take_current_task,
};
pub use signal::SignalFlags;
pub use task::{TaskControlBlock, TaskStatus};
pub use manager::add_task;
pub use id::{
PidHandle,
pid_alloc,
KernelStack,
kstack_alloc,
};
pub fn suspend_current_and_run_next() {
// There must be an application running.
@ -76,6 +67,7 @@ pub fn exit_current_and_run_next(exit_code: i32) {
// however, if this is the main thread of current process
// the process should terminate at once
if tid == 0 {
remove_from_pid2process(process.getpid());
let mut process_inner = process.inner_exclusive_access();
// mark this process as a zombie process
process_inner.is_zombie = true;
@ -86,7 +78,7 @@ pub fn exit_current_and_run_next(exit_code: i32) {
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
}
@ -103,6 +95,8 @@ pub fn exit_current_and_run_next(exit_code: i32) {
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
process_inner.fd_table.clear();
}
drop(process);
// we do not have to save task context
@ -121,3 +115,15 @@ lazy_static! {
pub fn add_initproc() {
let _initproc = INITPROC.clone();
}
pub fn check_signals_of_current() -> Option<(i32, &'static str)> {
let process = current_process();
let process_inner = process.inner_exclusive_access();
process_inner.signals.check_error()
}
pub fn current_add_signal(signal: SignalFlags) {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
process_inner.signals |= signal;
}

View file

@ -1,20 +1,17 @@
use crate::mm::{
MemorySet,
KERNEL_SPACE,
translated_refmut,
};
use crate::trap::{TrapContext, trap_handler};
use crate::sync::{UPSafeCell, Mutex, Semaphore};
use core::cell::RefMut;
use super::id::RecycleAllocator;
use super::manager::insert_into_pid2process;
use super::TaskControlBlock;
use super::{PidHandle, pid_alloc};
use super::add_task;
use alloc::sync::{Weak, Arc};
use super::{add_task, SignalFlags};
use super::{pid_alloc, PidHandle};
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE};
use crate::sync::{Condvar, Mutex, Semaphore, UPSafeCell};
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
use alloc::string::String;
use crate::fs::{File, Stdin, Stdout};
use core::cell::RefMut;
pub struct ProcessControlBlock {
// immutable
@ -30,10 +27,12 @@ pub struct ProcessControlBlockInner {
pub children: Vec<Arc<ProcessControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub signals: SignalFlags,
pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
pub task_res_allocator: RecycleAllocator,
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
}
impl ProcessControlBlockInner {
@ -43,8 +42,7 @@ impl ProcessControlBlockInner {
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len())
.find(|fd| self.fd_table[*fd].is_none()) {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
@ -56,7 +54,7 @@ impl ProcessControlBlockInner {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize){
pub fn dealloc_tid(&mut self, tid: usize) {
self.task_res_allocator.dealloc(tid)
}
@ -81,25 +79,29 @@ impl ProcessControlBlock {
let pid_handle = pid_alloc();
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
})}
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
signals: SignalFlags::empty(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
@ -124,6 +126,7 @@ impl ProcessControlBlock {
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
insert_into_pid2process(process.getpid(), Arc::clone(&process));
// add main thread to scheduler
add_task(task);
process
@ -152,7 +155,7 @@ impl ProcessControlBlock {
.map(|arg| {
translated_refmut(
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
@ -199,28 +202,38 @@ impl ProcessControlBlock {
new_fd_table.push(None);
}
}
// create child process pcb
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
})}
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
signals: SignalFlags::empty(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// add child
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent.get_task(0).inner_exclusive_access().res.as_ref().unwrap().ustack_base(),
parent
.get_task(0)
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kstack here
false,
@ -234,6 +247,7 @@ impl ProcessControlBlock {
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task.kstack.get_top();
drop(task_inner);
insert_into_pid2process(child.getpid(), Arc::clone(&child));
// add this thread to scheduler
add_task(task);
child
@ -243,4 +257,3 @@ impl ProcessControlBlock {
self.pid.0
}
}

View file

@ -1,10 +1,10 @@
use super::{TaskContext, TaskControlBlock, ProcessControlBlock};
use super::__switch;
use super::{fetch_task, TaskStatus};
use super::{ProcessControlBlock, TaskContext, TaskControlBlock};
use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
use super::{fetch_task, TaskStatus};
use super::__switch;
use crate::trap::TrapContext;
use crate::sync::UPSafeCell;
pub struct Processor {
current: Option<Arc<TaskControlBlock>>,
@ -25,14 +25,12 @@ impl Processor {
self.current.take()
}
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.current.as_ref().map(|task| Arc::clone(task))
self.current.as_ref().map(Arc::clone)
}
}
lazy_static! {
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe {
UPSafeCell::new(Processor::new())
};
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
pub fn run_tasks() {
@ -50,13 +48,10 @@ pub fn run_tasks() {
// release processor manually
drop(processor);
unsafe {
__switch(
idle_task_cx_ptr,
next_task_cx_ptr,
);
__switch(idle_task_cx_ptr, next_task_cx_ptr);
}
} else {
println!("no tasks available in run_tasks");
println!("no tasks available in run_tasks");
}
}
}
@ -75,12 +70,14 @@ pub fn current_process() -> Arc<ProcessControlBlock> {
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
let token = task.get_user_token();
token
task.get_user_token()
}
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().inner_exclusive_access().get_trap_cx()
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
@ -94,10 +91,7 @@ pub fn current_trap_cx_user_va() -> usize {
}
pub fn current_kstack_top() -> usize {
current_task()
.unwrap()
.kstack
.get_top()
current_task().unwrap().kstack.get_top()
}
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
@ -105,9 +99,6 @@ pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
drop(processor);
unsafe {
__switch(
switched_task_cx_ptr,
idle_task_cx_ptr,
);
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
}
}

29
os/src/task/signal.rs Normal file
View file

@ -0,0 +1,29 @@
use bitflags::*;
bitflags! {
pub struct SignalFlags: u32 {
const SIGINT = 1 << 2;
const SIGILL = 1 << 4;
const SIGABRT = 1 << 6;
const SIGFPE = 1 << 8;
const SIGSEGV = 1 << 11;
}
}
impl SignalFlags {
pub fn check_error(&self) -> Option<(i32, &'static str)> {
if self.contains(Self::SIGINT) {
Some((-2, "Killed, SIGINT=2"))
} else if self.contains(Self::SIGILL) {
Some((-4, "Illegal Instruction, SIGILL=4"))
} else if self.contains(Self::SIGABRT) {
Some((-6, "Aborted, SIGABRT=6"))
} else if self.contains(Self::SIGFPE) {
Some((-8, "Erroneous Arithmetic Operation, SIGFPE=8"))
} else if self.contains(Self::SIGSEGV) {
Some((-11, "Segmentation Fault, SIGSEGV=11"))
} else {
None
}
}
}

View file

@ -1,12 +1,8 @@
use super::TaskContext;
use core::arch::global_asm;
global_asm!(include_str!("switch.S"));
use super::TaskContext;
extern "C" {
pub fn __switch(
current_task_cx_ptr: *mut TaskContext,
next_task_cx_ptr: *const TaskContext
);
pub fn __switch(current_task_cx_ptr: *mut TaskContext, next_task_cx_ptr: *const TaskContext);
}

View file

@ -1,8 +1,8 @@
use alloc::sync::{Arc, Weak};
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use crate::trap::TrapContext;
use super::id::TaskUserRes;
use super::{KernelStack, ProcessControlBlock, TaskContext, kstack_alloc};
use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext};
use crate::trap::TrapContext;
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use alloc::sync::{Arc, Weak};
use core::cell::RefMut;
pub struct TaskControlBlock {
@ -37,7 +37,7 @@ impl TaskControlBlockInner {
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
#[allow(unused)]
fn get_status(&self) -> TaskStatus {
self.task_status
@ -48,7 +48,7 @@ impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool
alloc_user_res: bool,
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
@ -57,15 +57,15 @@ impl TaskControlBlock {
Self {
process: Arc::downgrade(&process),
kstack,
inner: unsafe { UPSafeCell::new(
TaskControlBlockInner {
inner: unsafe {
UPSafeCell::new(TaskControlBlockInner {
res: Some(res),
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: None,
}
)},
})
},
}
}
}

View file

@ -1,13 +1,13 @@
use core::cmp::Ordering;
use riscv::register::time;
use crate::sbi::set_timer;
use crate::config::CLOCK_FREQ;
use crate::task::{TaskControlBlock, add_task};
use crate::sbi::set_timer;
use crate::sync::UPSafeCell;
use crate::task::{add_task, TaskControlBlock};
use alloc::collections::BinaryHeap;
use alloc::sync::Arc;
use lazy_static::*;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
const MSEC_PER_SEC: usize = 1000;
@ -39,28 +39,24 @@ impl PartialOrd for TimerCondVar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a = -(self.expire_ms as isize);
let b = -(other.expire_ms as isize);
Some(a.cmp(&b))
Some(a.cmp(&b))
}
}
impl Ord for TimerCondVar {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
self.partial_cmp(other).unwrap()
}
}
lazy_static! {
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> = unsafe { UPSafeCell::new(
BinaryHeap::<TimerCondVar>::new()
)};
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> =
unsafe { UPSafeCell::new(BinaryHeap::<TimerCondVar>::new()) };
}
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {
let mut timers = TIMERS.exclusive_access();
timers.push(TimerCondVar {
expire_ms,
task,
});
timers.push(TimerCondVar { expire_ms, task });
}
pub fn check_timer() {
@ -68,9 +64,10 @@ pub fn check_timer() {
let mut timers = TIMERS.exclusive_access();
while let Some(timer) = timers.peek() {
if timer.expire_ms <= current_ms {
add_task(Arc::clone(&timer.task));
drop(timer);
add_task(Arc::clone(&timer.task));
timers.pop();
} else { break; }
} else {
break;
}
}
}

View file

@ -1,4 +1,4 @@
use riscv::register::sstatus::{Sstatus, self, SPP};
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
#[derive(Debug)]
@ -12,7 +12,9 @@ pub struct TrapContext {
}
impl TrapContext {
pub fn set_sp(&mut self, sp: usize) { self.x[2] = sp; }
pub fn set_sp(&mut self, sp: usize) {
self.x[2] = sp;
}
pub fn app_init_context(
entry: usize,
sp: usize,

View file

@ -1,28 +1,18 @@
mod context;
use crate::config::TRAMPOLINE;
use crate::syscall::syscall;
use crate::task::{
check_signals_of_current, current_add_signal, current_trap_cx, current_trap_cx_user_va,
current_user_token, exit_current_and_run_next, suspend_current_and_run_next, SignalFlags,
};
use crate::timer::{check_timer, set_next_trigger};
use core::arch::{asm, global_asm};
use riscv::register::{
mtvec::TrapMode,
stvec,
scause::{
self,
Trap,
Exception,
Interrupt,
},
stval,
sie,
scause::{self, Exception, Interrupt, Trap},
sie, stval, stvec,
};
use crate::syscall::syscall;
use crate::task::{
exit_current_and_run_next,
suspend_current_and_run_next,
current_user_token,
current_trap_cx,
current_trap_cx_user_va,
};
use crate::timer::{set_next_trigger, check_timer};
use crate::config::TRAMPOLINE;
global_asm!(include_str!("trap.S"));
@ -43,7 +33,9 @@ fn set_user_trap_entry() {
}
pub fn enable_timer_interrupt() {
unsafe { sie::set_stimer(); }
unsafe {
sie::set_stimer();
}
}
#[no_mangle]
@ -62,25 +54,24 @@ pub fn trap_handler() -> ! {
cx = current_trap_cx();
cx.x[10] = result as usize;
}
Trap::Exception(Exception::StoreFault) |
Trap::Exception(Exception::StorePageFault) |
Trap::Exception(Exception::InstructionFault) |
Trap::Exception(Exception::InstructionPageFault) |
Trap::Exception(Exception::LoadFault) |
Trap::Exception(Exception::LoadPageFault) => {
Trap::Exception(Exception::StoreFault)
| Trap::Exception(Exception::StorePageFault)
| Trap::Exception(Exception::InstructionFault)
| Trap::Exception(Exception::InstructionPageFault)
| Trap::Exception(Exception::LoadFault)
| Trap::Exception(Exception::LoadPageFault) => {
/*
println!(
"[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, core dumped.",
"[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, kernel killed it.",
scause.cause(),
stval,
current_trap_cx().sepc,
);
// page fault exit code
exit_current_and_run_next(-2);
*/
current_add_signal(SignalFlags::SIGSEGV);
}
Trap::Exception(Exception::IllegalInstruction) => {
println!("[kernel] IllegalInstruction in application, core dumped.");
// illegal instruction exit code
exit_current_and_run_next(-3);
current_add_signal(SignalFlags::SIGILL);
}
Trap::Interrupt(Interrupt::SupervisorTimer) => {
set_next_trigger();
@ -88,10 +79,18 @@ pub fn trap_handler() -> ! {
suspend_current_and_run_next();
}
_ => {
panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval);
panic!(
"Unsupported trap {:?}, stval = {:#x}!",
scause.cause(),
stval
);
}
}
//println!("before trap_return");
// check signals
if let Some((errno, msg)) = check_signals_of_current() {
println!("[kernel] {}", msg);
exit_current_and_run_next(errno);
}
trap_return();
}