use exclusive_session to eliminate some explicit drops.
This commit is contained in:
parent
ba611a1458
commit
fb196d35a9
8 changed files with 21 additions and 54 deletions
|
@ -1,6 +1,5 @@
|
|||
use crate::drivers::chardev::{CharDevice, UART};
|
||||
use core::fmt::{self, Write};
|
||||
use crate::sbi::console_putchar;
|
||||
|
||||
struct Stdout;
|
||||
|
||||
|
@ -8,7 +7,6 @@ impl Write for Stdout {
|
|||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
for c in s.chars() {
|
||||
UART.write(c as u8);
|
||||
//console_putchar(c as usize);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -28,17 +28,10 @@ impl BlockDevice for VirtIOBlock {
|
|||
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
|
||||
if nb {
|
||||
let mut resp = BlkResp::default();
|
||||
/*
|
||||
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
|
||||
let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() };
|
||||
self.condvars.get(&token).unwrap().wait_no_sched()
|
||||
});
|
||||
*/
|
||||
let mut blk = self.virtio_blk.exclusive_access();
|
||||
let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() };
|
||||
//println!("waiting on token {}", token);
|
||||
let task_cx_ptr = self.condvars.get(&token).unwrap().wait_no_sched();
|
||||
drop(blk);
|
||||
schedule(task_cx_ptr);
|
||||
assert_eq!(
|
||||
resp.status(),
|
||||
|
@ -56,16 +49,10 @@ impl BlockDevice for VirtIOBlock {
|
|||
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
|
||||
if nb {
|
||||
let mut resp = BlkResp::default();
|
||||
/*
|
||||
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
|
||||
let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() };
|
||||
self.condvars.get(&token).unwrap().wait_no_sched()
|
||||
});
|
||||
*/
|
||||
let mut blk = self.virtio_blk.exclusive_access();
|
||||
let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() };
|
||||
let task_cx_ptr = self.condvars.get(&token).unwrap().wait_no_sched();
|
||||
drop(blk);
|
||||
schedule(task_cx_ptr);
|
||||
assert_eq!(
|
||||
resp.status(),
|
||||
|
@ -80,21 +67,11 @@ impl BlockDevice for VirtIOBlock {
|
|||
}
|
||||
}
|
||||
fn handle_irq(&self) {
|
||||
//println!("into handle_irq");
|
||||
/*
|
||||
self.virtio_blk.exclusive_session(|blk| {
|
||||
//println!("not panic here");
|
||||
while let Ok(token) = blk.pop_used() {
|
||||
//println!("wakeup virtio.token {}", token);
|
||||
self.condvars.get(&token).unwrap().signal();
|
||||
}
|
||||
});
|
||||
*/
|
||||
let mut blk = self.virtio_blk.exclusive_access();
|
||||
while let Ok(token) = blk.pop_used() {
|
||||
//println!("wakeup virtio.token {}", token);
|
||||
self.condvars.get(&token).unwrap().signal();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,6 @@ impl<const BASE_ADDR: usize> NS16550a<BASE_ADDR> {
|
|||
|
||||
impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
|
||||
fn read(&self) -> u8 {
|
||||
//println!("NS16550a::read");
|
||||
loop {
|
||||
let mut inner = self.inner.exclusive_access();
|
||||
if let Some(ch) = inner.read_buffer.pop_front() {
|
||||
|
@ -154,7 +153,6 @@ impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
|
|||
} else {
|
||||
let task_cx_ptr = self.condvar.wait_no_sched();
|
||||
drop(inner);
|
||||
//println!("before scheduling");
|
||||
schedule(task_cx_ptr);
|
||||
}
|
||||
}
|
||||
|
@ -164,15 +162,13 @@ impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
|
|||
inner.ns16550a.write(ch);
|
||||
}
|
||||
fn handle_irq(&self) {
|
||||
let mut inner = self.inner.exclusive_access();
|
||||
let mut count = 0;
|
||||
while let Some(ch) = inner.ns16550a.read() {
|
||||
//println!("got {}", ch as char);
|
||||
count += 1;
|
||||
inner.read_buffer.push_back(ch);
|
||||
}
|
||||
drop(inner);
|
||||
//assert_eq!(count, 1);
|
||||
self.inner.exclusive_session(|inner| {
|
||||
while let Some(ch) = inner.ns16550a.read() {
|
||||
count += 1;
|
||||
inner.read_buffer.push_back(ch);
|
||||
}
|
||||
});
|
||||
if count > 0 {
|
||||
self.condvar.signal();
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ impl PLIC {
|
|||
self.priority_ptr(intr_source_id).write_volatile(priority);
|
||||
}
|
||||
}
|
||||
#[allow(unused)]
|
||||
pub fn get_priority(&mut self, intr_source_id: usize) -> u32 {
|
||||
unsafe { self.priority_ptr(intr_source_id).read_volatile() & 7 }
|
||||
}
|
||||
|
@ -100,6 +101,7 @@ impl PLIC {
|
|||
threshold_ptr.write_volatile(threshold);
|
||||
}
|
||||
}
|
||||
#[allow(unused)]
|
||||
pub fn get_threshold(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 {
|
||||
let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority);
|
||||
unsafe { threshold_ptr.read_volatile() & 7 }
|
||||
|
|
|
@ -28,30 +28,27 @@ impl Condvar {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn wait(&self) {
|
||||
let mut inner = self.inner.exclusive_access();
|
||||
inner.wait_queue.push_back(current_task().unwrap());
|
||||
drop(inner);
|
||||
block_current_and_run_next();
|
||||
}
|
||||
*/
|
||||
|
||||
pub fn wait_no_sched(&self) -> *mut TaskContext {
|
||||
/*
|
||||
self.inner.exclusive_session(|inner| {
|
||||
inner.wait_queue.push_back(current_task().unwrap());
|
||||
});
|
||||
*/
|
||||
let mut inner = self.inner.exclusive_access();
|
||||
inner.wait_queue.push_back(current_task().unwrap());
|
||||
drop(inner);
|
||||
block_current_task()
|
||||
}
|
||||
|
||||
pub fn wait_with_mutex(&self, mutex: Arc<dyn Mutex>) {
|
||||
mutex.unlock();
|
||||
let mut inner = self.inner.exclusive_access();
|
||||
inner.wait_queue.push_back(current_task().unwrap());
|
||||
drop(inner);
|
||||
self.inner.exclusive_session(|inner| {
|
||||
inner.wait_queue.push_back(current_task().unwrap());
|
||||
});
|
||||
block_current_and_run_next();
|
||||
mutex.lock();
|
||||
}
|
||||
|
|
|
@ -107,12 +107,10 @@ impl<T> UPIntrFreeCell<T> {
|
|||
UPIntrRefMut(Some(self.inner.borrow_mut()))
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn exclusive_session<F, V>(&self, f: F) -> V where F: FnOnce(&mut T) -> V {
|
||||
let mut inner = self.exclusive_access();
|
||||
f(inner.deref_mut())
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for UPIntrRefMut<'a, T> {
|
||||
|
|
|
@ -39,11 +39,10 @@ pub fn run_tasks() {
|
|||
if let Some(task) = fetch_task() {
|
||||
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
|
||||
// access coming task TCB exclusively
|
||||
let mut task_inner = task.inner_exclusive_access();
|
||||
let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext;
|
||||
task_inner.task_status = TaskStatus::Running;
|
||||
drop(task_inner);
|
||||
// release coming task TCB manually
|
||||
let next_task_cx_ptr = task.inner.exclusive_session(|task_inner| {
|
||||
task_inner.task_status = TaskStatus::Running;
|
||||
&task_inner.task_cx as *const TaskContext
|
||||
});
|
||||
processor.current = Some(task);
|
||||
// release processor manually
|
||||
drop(processor);
|
||||
|
@ -95,9 +94,9 @@ pub fn current_kstack_top() -> usize {
|
|||
}
|
||||
|
||||
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
|
||||
let mut processor = PROCESSOR.exclusive_access();
|
||||
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
|
||||
drop(processor);
|
||||
let idle_task_cx_ptr = PROCESSOR.exclusive_session(|processor| {
|
||||
processor.get_idle_task_cx_ptr()
|
||||
});
|
||||
unsafe {
|
||||
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ pub struct TaskControlBlock {
|
|||
pub process: Weak<ProcessControlBlock>,
|
||||
pub kstack: KernelStack,
|
||||
// mutable
|
||||
inner: UPIntrFreeCell<TaskControlBlockInner>,
|
||||
pub inner: UPIntrFreeCell<TaskControlBlockInner>,
|
||||
}
|
||||
|
||||
impl TaskControlBlock {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue