Substituted to the original ch7 code.

This commit is contained in:
Yifan Wu 2022-01-18 02:48:50 -08:00
parent 4fdd55e2e8
commit 57f7debbc6
50 changed files with 2786 additions and 128 deletions

3
easy-fs/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
.idea/
target/
Cargo.lock

11
easy-fs/Cargo.toml Normal file
View file

@ -0,0 +1,11 @@
[package]
name = "easy-fs"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spin = "0.7.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }

73
easy-fs/src/bitmap.rs Normal file
View file

@ -0,0 +1,73 @@
use alloc::sync::Arc;
use super::{
BlockDevice,
BLOCK_SZ,
get_block_cache,
};
type BitmapBlock = [u64; 64];
const BLOCK_BITS: usize = BLOCK_SZ * 8;
pub struct Bitmap {
start_block_id: usize,
blocks: usize,
}
/// Return (block_pos, bits64_pos, inner_pos)
fn decomposition(mut bit: usize) -> (usize, usize, usize) {
let block_pos = bit / BLOCK_BITS;
bit = bit % BLOCK_BITS;
(block_pos, bit / 64, bit % 64)
}
impl Bitmap {
pub fn new(start_block_id: usize, blocks: usize) -> Self {
Self {
start_block_id,
blocks,
}
}
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
for block_id in 0..self.blocks {
let pos = get_block_cache(
block_id + self.start_block_id as usize,
Arc::clone(block_device),
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| {
(bits64_pos, bits64.trailing_ones() as usize)
}) {
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
} else {
None
}
});
if pos.is_some() {
return pos;
}
}
None
}
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(
block_pos + self.start_block_id,
Arc::clone(block_device)
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS
}
}

135
easy-fs/src/block_cache.rs Normal file
View file

@ -0,0 +1,135 @@
use super::{
BLOCK_SZ,
BlockDevice,
};
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
use spin::Mutex;
pub struct BlockCache {
cache: [u8; BLOCK_SZ],
block_id: usize,
block_device: Arc<dyn BlockDevice>,
modified: bool,
}
impl BlockCache {
/// Load a new BlockCache from disk.
pub fn new(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Self {
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
Self {
cache,
block_id,
block_device,
modified: false,
}
}
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T where T: Sized {
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T where T: Sized {
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
let addr = self.addr_of_offset(offset);
unsafe { &mut *(addr as *mut T) }
}
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset:usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
pub fn sync(&mut self) {
if self.modified {
self.modified = false;
self.block_device.write_block(self.block_id, &self.cache);
}
}
}
impl Drop for BlockCache {
fn drop(&mut self) {
self.sync()
}
}
const BLOCK_CACHE_SIZE: usize = 16;
pub struct BlockCacheManager {
queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
}
impl BlockCacheManager {
pub fn new() -> Self {
Self { queue: VecDeque::new() }
}
pub fn get_block_cache(
&mut self,
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
if let Some(pair) = self.queue
.iter()
.find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1) {
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(
BlockCache::new(block_id, Arc::clone(&block_device))
));
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
}
}
lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new(
BlockCacheManager::new()
);
}
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
}
pub fn block_cache_sync_all() {
let manager = BLOCK_CACHE_MANAGER.lock();
for (_, cache) in manager.queue.iter() {
cache.lock().sync();
}
}

6
easy-fs/src/block_dev.rs Normal file
View file

@ -0,0 +1,6 @@
use core::any::Any;
pub trait BlockDevice : Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]);
fn write_block(&self, block_id: usize, buf: &[u8]);
}

163
easy-fs/src/efs.rs Normal file
View file

@ -0,0 +1,163 @@
use alloc::sync::Arc;
use spin::Mutex;
use super::{
BlockDevice,
Bitmap,
SuperBlock,
DiskInode,
DiskInodeType,
Inode,
get_block_cache,
block_cache_sync_all,
};
use crate::BLOCK_SZ;
pub struct EasyFileSystem {
pub block_device: Arc<dyn BlockDevice>,
pub inode_bitmap: Bitmap,
pub data_bitmap: Bitmap,
inode_area_start_block: u32,
data_area_start_block: u32,
}
type DataBlock = [u8; BLOCK_SZ];
impl EasyFileSystem {
pub fn create(
block_device: Arc<dyn BlockDevice>,
total_blocks: u32,
inode_bitmap_blocks: u32,
) -> Arc<Mutex<Self>> {
// calculate block size of areas & create bitmaps
let inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
let inode_num = inode_bitmap.maximum();
let inode_area_blocks =
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
let data_bitmap = Bitmap::new(
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
data_bitmap_blocks as usize,
);
let mut efs = Self {
block_device: Arc::clone(&block_device),
inode_bitmap,
data_bitmap,
inode_area_start_block: 1 + inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + data_bitmap_blocks,
};
// clear all blocks
for i in 0..total_blocks {
get_block_cache(
i as usize,
Arc::clone(&block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() { *byte = 0; }
});
}
// initialize SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.modify(0, |super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
});
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(
root_inode_block_id as usize,
Arc::clone(&block_device)
)
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
block_cache_sync_all();
Arc::new(Mutex::new(efs))
}
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
// read SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.read(0, |super_block: &SuperBlock| {
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks =
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(
1,
super_block.inode_bitmap_blocks as usize
),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
};
Arc::new(Mutex::new(efs))
})
}
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
let block_device = Arc::clone(&efs.lock().block_device);
// acquire efs lock temporarily
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
// release efs lock
Inode::new(
block_id,
block_offset,
Arc::clone(efs),
block_device,
)
}
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
(block_id, (inode_id % inodes_per_block) as usize * inode_size)
}
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.data_area_start_block + data_block_id
}
pub fn alloc_inode(&mut self) -> u32 {
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32
}
/// Return a block ID not ID in the data area.
pub fn alloc_data(&mut self) -> u32 {
self.data_bitmap.alloc(&self.block_device).unwrap() as u32 + self.data_area_start_block
}
pub fn dealloc_data(&mut self, block_id: u32) {
get_block_cache(
block_id as usize,
Arc::clone(&self.block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| { *p = 0; })
});
self.data_bitmap.dealloc(
&self.block_device,
(block_id - self.data_area_start_block) as usize
)
}
}

448
easy-fs/src/layout.rs Normal file
View file

@ -0,0 +1,448 @@
use core::fmt::{Debug, Formatter, Result};
use super::{
BLOCK_SZ,
BlockDevice,
get_block_cache,
};
use alloc::sync::Arc;
use alloc::vec::Vec;
const EFS_MAGIC: u32 = 0x3b800001;
const INODE_DIRECT_COUNT: usize = 28;
const NAME_LENGTH_LIMIT: usize = 27;
const INODE_INDIRECT1_COUNT: usize = BLOCK_SZ / 4;
const INODE_INDIRECT2_COUNT: usize = INODE_INDIRECT1_COUNT * INODE_INDIRECT1_COUNT;
const DIRECT_BOUND: usize = INODE_DIRECT_COUNT;
const INDIRECT1_BOUND: usize = DIRECT_BOUND + INODE_INDIRECT1_COUNT;
#[allow(unused)]
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
#[repr(C)]
pub struct SuperBlock {
magic: u32,
pub total_blocks: u32,
pub inode_bitmap_blocks: u32,
pub inode_area_blocks: u32,
pub data_bitmap_blocks: u32,
pub data_area_blocks: u32,
}
impl Debug for SuperBlock {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("SuperBlock")
.field("total_blocks", &self.total_blocks)
.field("inode_bitmap_blocks", &self.inode_bitmap_blocks)
.field("inode_area_blocks", &self.inode_area_blocks)
.field("data_bitmap_blocks", &self.data_bitmap_blocks)
.field("data_area_blocks", &self.data_area_blocks)
.finish()
}
}
impl SuperBlock {
pub fn initialize(
&mut self,
total_blocks: u32,
inode_bitmap_blocks: u32,
inode_area_blocks: u32,
data_bitmap_blocks: u32,
data_area_blocks: u32,
) {
*self = Self {
magic: EFS_MAGIC,
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
}
}
pub fn is_valid(&self) -> bool {
self.magic == EFS_MAGIC
}
}
#[derive(PartialEq)]
pub enum DiskInodeType {
File,
Directory,
}
type IndirectBlock = [u32; BLOCK_SZ / 4];
type DataBlock = [u8; BLOCK_SZ];
#[repr(C)]
pub struct DiskInode {
pub size: u32,
pub direct: [u32; INODE_DIRECT_COUNT],
pub indirect1: u32,
pub indirect2: u32,
type_: DiskInodeType,
}
impl DiskInode {
/// indirect1 and indirect2 block are allocated only when they are needed.
pub fn initialize(&mut self, type_: DiskInodeType) {
self.size = 0;
self.direct.iter_mut().for_each(|v| *v = 0);
self.indirect1 = 0;
self.indirect2 = 0;
self.type_ = type_;
}
pub fn is_dir(&self) -> bool {
self.type_ == DiskInodeType::Directory
}
#[allow(unused)]
pub fn is_file(&self) -> bool {
self.type_ == DiskInodeType::File
}
/// Return block number correspond to size.
pub fn data_blocks(&self) -> u32 {
Self::_data_blocks(self.size)
}
fn _data_blocks(size: u32) -> u32 {
(size + BLOCK_SZ as u32 - 1) / BLOCK_SZ as u32
}
/// Return number of blocks needed include indirect1/2.
pub fn total_blocks(size: u32) -> u32 {
let data_blocks = Self::_data_blocks(size) as usize;
let mut total = data_blocks as usize;
// indirect1
if data_blocks > INODE_DIRECT_COUNT {
total += 1;
}
// indirect2
if data_blocks > INDIRECT1_BOUND {
total += 1;
// sub indirect1
total += (data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
}
total as u32
}
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
assert!(new_size >= self.size);
Self::total_blocks(new_size) - Self::total_blocks(self.size)
}
pub fn get_block_id(&self, inner_id: u32, block_device: &Arc<dyn BlockDevice>) -> u32 {
let inner_id = inner_id as usize;
if inner_id < INODE_DIRECT_COUNT {
self.direct[inner_id]
} else if inner_id < INDIRECT1_BOUND {
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect_block: &IndirectBlock| {
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
} else {
let last = inner_id - INDIRECT1_BOUND;
let indirect1 = get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device)
)
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(
indirect1 as usize,
Arc::clone(block_device)
)
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
}
}
pub fn increase_size(
&mut self,
new_size: u32,
new_blocks: Vec<u32>,
block_device: &Arc<dyn BlockDevice>,
) {
let mut current_blocks = self.data_blocks();
self.size = new_size;
let mut total_blocks = self.data_blocks();
let mut new_blocks = new_blocks.into_iter();
// fill direct
while current_blocks < total_blocks.min(INODE_DIRECT_COUNT as u32) {
self.direct[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
// alloc indirect1
if total_blocks > INODE_DIRECT_COUNT as u32{
if current_blocks == INODE_DIRECT_COUNT as u32 {
self.indirect1 = new_blocks.next().unwrap();
}
current_blocks -= INODE_DIRECT_COUNT as u32;
total_blocks -= INODE_DIRECT_COUNT as u32;
} else {
return;
}
// fill indirect1
get_block_cache(
self.indirect1 as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
// alloc indirect2
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
self.indirect2 = new_blocks.next().unwrap();
}
current_blocks -= INODE_INDIRECT1_COUNT as u32;
total_blocks -= INODE_INDIRECT1_COUNT as u32;
} else {
return;
}
// fill indirect2 from (a0, b0) -> (a1, b1)
let mut a0 = current_blocks as usize / INODE_INDIRECT1_COUNT;
let mut b0 = current_blocks as usize % INODE_INDIRECT1_COUNT;
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
// alloc low-level indirect1
get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(
indirect2[a0] as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
}
}
});
}
/// Clear size to zero and return blocks that should be deallocated.
///
/// We will clear the block contents to zero later.
pub fn clear_size(&mut self, block_device: &Arc<dyn BlockDevice>) -> Vec<u32> {
let mut v: Vec<u32> = Vec::new();
let mut data_blocks = self.data_blocks() as usize;
self.size = 0;
let mut current_blocks = 0usize;
// direct
while current_blocks < data_blocks.min(INODE_DIRECT_COUNT) {
v.push(self.direct[current_blocks]);
self.direct[current_blocks] = 0;
current_blocks += 1;
}
// indirect1 block
if data_blocks > INODE_DIRECT_COUNT {
v.push(self.indirect1);
data_blocks -= INODE_DIRECT_COUNT;
current_blocks = 0;
} else {
return v;
}
// indirect1
get_block_cache(
self.indirect1 as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
self.indirect1 = 0;
// indirect2 block
if data_blocks > INODE_INDIRECT1_COUNT {
v.push(self.indirect2);
data_blocks -= INODE_INDIRECT1_COUNT;
} else {
return v;
}
// indirect2
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for i in 0..a1 {
v.push(indirect2[i]);
get_block_cache(
indirect2[i] as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for j in 0..INODE_INDIRECT1_COUNT {
v.push(indirect1[j]);
//indirect1[j] = 0;
}
});
//indirect2[i] = 0;
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(
indirect2[a1] as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for j in 0..b1 {
v.push(indirect1[j]);
//indirect1[j] = 0;
}
});
//indirect2[a1] = 0;
}
});
self.indirect2 = 0;
v
}
pub fn read_at(
&self,
offset: usize,
buf: &mut [u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
if start >= end {
return 0;
}
let mut start_block = start / BLOCK_SZ;
let mut read_size = 0usize;
loop {
// calculate end of current block
let mut end_current_block = (start / BLOCK_SZ + 1) * BLOCK_SZ;
end_current_block = end_current_block.min(end);
// read and update read size
let block_read_size = end_current_block - start;
let dst = &mut buf[read_size..read_size + block_read_size];
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.read(0, |data_block: &DataBlock| {
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
dst.copy_from_slice(src);
});
read_size += block_read_size;
// move to next block
if end_current_block == end { break; }
start_block += 1;
start = end_current_block;
}
read_size
}
/// File size must be adjusted before.
pub fn write_at(
&mut self,
offset: usize,
buf: &[u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
assert!(start <= end);
let mut start_block = start / BLOCK_SZ;
let mut write_size = 0usize;
loop {
// calculate end of current block
let mut end_current_block = (start / BLOCK_SZ + 1) * BLOCK_SZ;
end_current_block = end_current_block.min(end);
// write and update write size
let block_write_size = end_current_block - start;
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
let src = &buf[write_size..write_size + block_write_size];
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
dst.copy_from_slice(src);
});
write_size += block_write_size;
// move to next block
if end_current_block == end { break; }
start_block += 1;
start = end_current_block;
}
write_size
}
}
#[repr(C)]
pub struct DirEntry {
name: [u8; NAME_LENGTH_LIMIT + 1],
inode_number: u32,
}
pub const DIRENT_SZ: usize = 32;
impl DirEntry {
pub fn empty() -> Self {
Self {
name: [0u8; NAME_LENGTH_LIMIT + 1],
inode_number: 0,
}
}
pub fn new(name: &str, inode_number: u32) -> Self {
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
bytes[..name.len()].copy_from_slice(name.as_bytes());
Self {
name: bytes,
inode_number,
}
}
pub fn as_bytes(&self) -> &[u8] {
unsafe {
core::slice::from_raw_parts(
self as *const _ as usize as *const u8,
DIRENT_SZ,
)
}
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
core::slice::from_raw_parts_mut(
self as *mut _ as usize as *mut u8,
DIRENT_SZ,
)
}
}
pub fn name(&self) -> &str {
let len = (0usize..).find(|i| self.name[*i] == 0).unwrap();
core::str::from_utf8(&self.name[..len]).unwrap()
}
pub fn inode_number(&self) -> u32 {
self.inode_number
}
}

18
easy-fs/src/lib.rs Normal file
View file

@ -0,0 +1,18 @@
#![no_std]
extern crate alloc;
mod block_dev;
mod layout;
mod efs;
mod bitmap;
mod vfs;
mod block_cache;
pub const BLOCK_SZ: usize = 512;
pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
pub use vfs::Inode;
use layout::*;
use bitmap::Bitmap;
use block_cache::{get_block_cache, block_cache_sync_all};

210
easy-fs/src/vfs.rs Normal file
View file

@ -0,0 +1,210 @@
use super::{
BlockDevice,
DiskInode,
DiskInodeType,
DirEntry,
EasyFileSystem,
DIRENT_SZ,
get_block_cache,
block_cache_sync_all,
};
use alloc::sync::Arc;
use alloc::string::String;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
pub struct Inode {
block_id: usize,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
}
impl Inode {
/// We should not acquire efs lock here.
pub fn new(
block_id: u32,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
) -> Self {
Self {
block_id: block_id as usize,
block_offset,
fs,
block_device,
}
}
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().modify(self.block_offset, f)
}
fn find_inode_id(
&self,
name: &str,
disk_inode: &DiskInode,
) -> Option<u32> {
// assert it is a directory
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
disk_inode.read_at(
DIRENT_SZ * i,
dirent.as_bytes_mut(),
&self.block_device,
),
DIRENT_SZ,
);
if dirent.name() == name {
return Some(dirent.inode_number() as u32);
}
}
None
}
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode)
.map(|inode_id| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
))
})
})
}
fn increase_size(
&self,
new_size: u32,
disk_inode: &mut DiskInode,
fs: &mut MutexGuard<EasyFileSystem>,
) {
if new_size < disk_inode.size {
return;
}
let blocks_needed = disk_inode.blocks_num_needed(new_size);
let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed {
v.push(fs.alloc_data());
}
disk_inode.increase_size(new_size, v, &self.block_device);
}
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock();
if self.modify_disk_inode(|root_inode| {
// assert it is a directory
assert!(root_inode.is_dir());
// has the file been created?
self.find_inode_id(name, root_inode)
}).is_some() {
return None;
}
// create a new file
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
// initialize inode
let (new_inode_block_id, new_inode_block_offset)
= fs.get_disk_inode_pos(new_inode_id);
get_block_cache(
new_inode_block_id as usize,
Arc::clone(&self.block_device)
).lock().modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
self.increase_size(new_size as u32, root_inode, &mut fs);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
root_inode.write_at(
file_count * DIRENT_SZ,
dirent.as_bytes(),
&self.block_device,
);
});
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
block_cache_sync_all();
// return inode
Some(Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
)))
// release efs lock automatically by compiler
}
pub fn ls(&self) -> Vec<String> {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut v: Vec<String> = Vec::new();
for i in 0..file_count {
let mut dirent = DirEntry::empty();
assert_eq!(
disk_inode.read_at(
i * DIRENT_SZ,
dirent.as_bytes_mut(),
&self.block_device,
),
DIRENT_SZ,
);
v.push(String::from(dirent.name()));
}
v
})
}
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
disk_inode.read_at(offset, buf, &self.block_device)
})
}
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock();
let size = self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
disk_inode.write_at(offset, buf, &self.block_device)
});
block_cache_sync_all();
size
}
pub fn clear(&self) {
let mut fs = self.fs.lock();
self.modify_disk_inode(|disk_inode| {
let size = disk_inode.size;
let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
assert!(data_blocks_dealloc.len() == DiskInode::total_blocks(size) as usize);
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
});
block_cache_sync_all();
}
}