use exclusive_session to eliminate some explicit drops.

This commit is contained in:
Yifan Wu 2022-03-10 16:41:06 -08:00
parent ba611a1458
commit fb196d35a9
8 changed files with 21 additions and 54 deletions

View file

@ -1,6 +1,5 @@
use crate::drivers::chardev::{CharDevice, UART}; use crate::drivers::chardev::{CharDevice, UART};
use core::fmt::{self, Write}; use core::fmt::{self, Write};
use crate::sbi::console_putchar;
struct Stdout; struct Stdout;
@ -8,7 +7,6 @@ impl Write for Stdout {
fn write_str(&mut self, s: &str) -> fmt::Result { fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() { for c in s.chars() {
UART.write(c as u8); UART.write(c as u8);
//console_putchar(c as usize);
} }
Ok(()) Ok(())
} }

View file

@ -28,17 +28,10 @@ impl BlockDevice for VirtIOBlock {
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access(); let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
if nb { if nb {
let mut resp = BlkResp::default(); let mut resp = BlkResp::default();
/*
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| { let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() }; let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() };
self.condvars.get(&token).unwrap().wait_no_sched() self.condvars.get(&token).unwrap().wait_no_sched()
}); });
*/
let mut blk = self.virtio_blk.exclusive_access();
let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() };
//println!("waiting on token {}", token);
let task_cx_ptr = self.condvars.get(&token).unwrap().wait_no_sched();
drop(blk);
schedule(task_cx_ptr); schedule(task_cx_ptr);
assert_eq!( assert_eq!(
resp.status(), resp.status(),
@ -56,16 +49,10 @@ impl BlockDevice for VirtIOBlock {
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access(); let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
if nb { if nb {
let mut resp = BlkResp::default(); let mut resp = BlkResp::default();
/*
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| { let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() }; let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() };
self.condvars.get(&token).unwrap().wait_no_sched() self.condvars.get(&token).unwrap().wait_no_sched()
}); });
*/
let mut blk = self.virtio_blk.exclusive_access();
let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() };
let task_cx_ptr = self.condvars.get(&token).unwrap().wait_no_sched();
drop(blk);
schedule(task_cx_ptr); schedule(task_cx_ptr);
assert_eq!( assert_eq!(
resp.status(), resp.status(),
@ -80,21 +67,11 @@ impl BlockDevice for VirtIOBlock {
} }
} }
fn handle_irq(&self) { fn handle_irq(&self) {
//println!("into handle_irq");
/*
self.virtio_blk.exclusive_session(|blk| { self.virtio_blk.exclusive_session(|blk| {
//println!("not panic here");
while let Ok(token) = blk.pop_used() { while let Ok(token) = blk.pop_used() {
//println!("wakeup virtio.token {}", token);
self.condvars.get(&token).unwrap().signal(); self.condvars.get(&token).unwrap().signal();
} }
}); });
*/
let mut blk = self.virtio_blk.exclusive_access();
while let Ok(token) = blk.pop_used() {
//println!("wakeup virtio.token {}", token);
self.condvars.get(&token).unwrap().signal();
}
} }
} }

View file

@ -146,7 +146,6 @@ impl<const BASE_ADDR: usize> NS16550a<BASE_ADDR> {
impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> { impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
fn read(&self) -> u8 { fn read(&self) -> u8 {
//println!("NS16550a::read");
loop { loop {
let mut inner = self.inner.exclusive_access(); let mut inner = self.inner.exclusive_access();
if let Some(ch) = inner.read_buffer.pop_front() { if let Some(ch) = inner.read_buffer.pop_front() {
@ -154,7 +153,6 @@ impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
} else { } else {
let task_cx_ptr = self.condvar.wait_no_sched(); let task_cx_ptr = self.condvar.wait_no_sched();
drop(inner); drop(inner);
//println!("before scheduling");
schedule(task_cx_ptr); schedule(task_cx_ptr);
} }
} }
@ -164,15 +162,13 @@ impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
inner.ns16550a.write(ch); inner.ns16550a.write(ch);
} }
fn handle_irq(&self) { fn handle_irq(&self) {
let mut inner = self.inner.exclusive_access();
let mut count = 0; let mut count = 0;
self.inner.exclusive_session(|inner| {
while let Some(ch) = inner.ns16550a.read() { while let Some(ch) = inner.ns16550a.read() {
//println!("got {}", ch as char);
count += 1; count += 1;
inner.read_buffer.push_back(ch); inner.read_buffer.push_back(ch);
} }
drop(inner); });
//assert_eq!(count, 1);
if count > 0 { if count > 0 {
self.condvar.signal(); self.condvar.signal();
} }

View file

@ -62,6 +62,7 @@ impl PLIC {
self.priority_ptr(intr_source_id).write_volatile(priority); self.priority_ptr(intr_source_id).write_volatile(priority);
} }
} }
#[allow(unused)]
pub fn get_priority(&mut self, intr_source_id: usize) -> u32 { pub fn get_priority(&mut self, intr_source_id: usize) -> u32 {
unsafe { self.priority_ptr(intr_source_id).read_volatile() & 7 } unsafe { self.priority_ptr(intr_source_id).read_volatile() & 7 }
} }
@ -100,6 +101,7 @@ impl PLIC {
threshold_ptr.write_volatile(threshold); threshold_ptr.write_volatile(threshold);
} }
} }
#[allow(unused)]
pub fn get_threshold(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 { pub fn get_threshold(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 {
let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority); let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe { threshold_ptr.read_volatile() & 7 } unsafe { threshold_ptr.read_volatile() & 7 }

View file

@ -28,30 +28,27 @@ impl Condvar {
} }
} }
/*
pub fn wait(&self) { pub fn wait(&self) {
let mut inner = self.inner.exclusive_access(); let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap()); inner.wait_queue.push_back(current_task().unwrap());
drop(inner); drop(inner);
block_current_and_run_next(); block_current_and_run_next();
} }
*/
pub fn wait_no_sched(&self) -> *mut TaskContext { pub fn wait_no_sched(&self) -> *mut TaskContext {
/*
self.inner.exclusive_session(|inner| { self.inner.exclusive_session(|inner| {
inner.wait_queue.push_back(current_task().unwrap()); inner.wait_queue.push_back(current_task().unwrap());
}); });
*/
let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap());
drop(inner);
block_current_task() block_current_task()
} }
pub fn wait_with_mutex(&self, mutex: Arc<dyn Mutex>) { pub fn wait_with_mutex(&self, mutex: Arc<dyn Mutex>) {
mutex.unlock(); mutex.unlock();
let mut inner = self.inner.exclusive_access(); self.inner.exclusive_session(|inner| {
inner.wait_queue.push_back(current_task().unwrap()); inner.wait_queue.push_back(current_task().unwrap());
drop(inner); });
block_current_and_run_next(); block_current_and_run_next();
mutex.lock(); mutex.lock();
} }

View file

@ -107,12 +107,10 @@ impl<T> UPIntrFreeCell<T> {
UPIntrRefMut(Some(self.inner.borrow_mut())) UPIntrRefMut(Some(self.inner.borrow_mut()))
} }
/*
pub fn exclusive_session<F, V>(&self, f: F) -> V where F: FnOnce(&mut T) -> V { pub fn exclusive_session<F, V>(&self, f: F) -> V where F: FnOnce(&mut T) -> V {
let mut inner = self.exclusive_access(); let mut inner = self.exclusive_access();
f(inner.deref_mut()) f(inner.deref_mut())
} }
*/
} }
impl<'a, T> Drop for UPIntrRefMut<'a, T> { impl<'a, T> Drop for UPIntrRefMut<'a, T> {

View file

@ -39,11 +39,10 @@ pub fn run_tasks() {
if let Some(task) = fetch_task() { if let Some(task) = fetch_task() {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
// access coming task TCB exclusively // access coming task TCB exclusively
let mut task_inner = task.inner_exclusive_access(); let next_task_cx_ptr = task.inner.exclusive_session(|task_inner| {
let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext;
task_inner.task_status = TaskStatus::Running; task_inner.task_status = TaskStatus::Running;
drop(task_inner); &task_inner.task_cx as *const TaskContext
// release coming task TCB manually });
processor.current = Some(task); processor.current = Some(task);
// release processor manually // release processor manually
drop(processor); drop(processor);
@ -95,9 +94,9 @@ pub fn current_kstack_top() -> usize {
} }
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) { pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let mut processor = PROCESSOR.exclusive_access(); let idle_task_cx_ptr = PROCESSOR.exclusive_session(|processor| {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); processor.get_idle_task_cx_ptr()
drop(processor); });
unsafe { unsafe {
__switch(switched_task_cx_ptr, idle_task_cx_ptr); __switch(switched_task_cx_ptr, idle_task_cx_ptr);
} }

View file

@ -9,7 +9,7 @@ pub struct TaskControlBlock {
pub process: Weak<ProcessControlBlock>, pub process: Weak<ProcessControlBlock>,
pub kstack: KernelStack, pub kstack: KernelStack,
// mutable // mutable
inner: UPIntrFreeCell<TaskControlBlockInner>, pub inner: UPIntrFreeCell<TaskControlBlockInner>,
} }
impl TaskControlBlock { impl TaskControlBlock {