/* Create a series of blocks, and optionally the single-, double-, and/or triple-indirect blocks that refer to them. level specifies how many steps away from data blocks we are (between 0 and 3), map is an array into which the logical/physical associations of data blocks should be stored, next refers to the next logical block number, and nblocks is the number of logical blocks that are to be created. */ static int create_blocks(ext2_filesystem *fs, int level, uint32_t *map, int *next, int nblocks) { if (*next >= nblocks) return 0; /* Allocate the next block */ int blockno = alloc_block(fs); if (0 == level) { /* If this is a data block (as opposed to an indirect block), record the logical->physical mapping, and increment the next logical block number */ if (map) map[*next] = blockno; (*next)++; } if (0 < level) { /* Create a series of data or indirect blocks one level down, and store references to them in the current indirect block */ buffer *indirect_buf; try(bufcache_get(fs->bc,blockno,&indirect_buf)); int i; uint32_t *table = (uint32_t*)indirect_buf->data; for (i = 0; i < RPB; i++) table[i] = create_blocks(fs,level-1,map,next,nblocks); bufcache_release(fs->bc,indirect_buf,1); } return blockno; }
int ext2_lookup(inode *ino, const char *name, uint32_t *out) { //printf("OPEN\n"); ext2_filesystem *fs = (ino->fs); ext2_dirent *dir_entry; buffer *buff; // get number of block in total ext2_inode *in = &ino->in; uint32_t inodeSize = in->size; uint32_t blockSize = fs->blocksize; uint32_t numOfBlock = inodeSize / blockSize; //uint32_t inodenum; uint32_t currentBlock = 0; //used to report error int r, direntoffset; // for each block, get all the directory entries for (currentBlock=0; currentBlock<numOfBlock; currentBlock++) { // get the content of the block if(0 > (r = get_inode_block(ino, currentBlock, 0, &buff))) { return r; } //bufcache_release(fs->bc,buffer,0); for (direntoffset = 0; direntoffset < blockSize; direntoffset += dir_entry->rec_len) { // get the current directory entry dir_entry = (ext2_dirent*)&buff->data[direntoffset]; uint8_t nameLen = dir_entry->name_len; // compare with name if (dir_entry->inode!=0) // if valid entry { //compare the name if ( (nameLen == strlen(name)) && !strncmp(dir_entry->name,name,nameLen) ) { *out = dir_entry->inode; bufcache_release(fs->bc,buff,buff->dirty); //printf("RETURN\n"); return 0; } } } bufcache_release(fs->bc,buff,buff->dirty); } bufcache_release(fs->bc,buff,buff->dirty); //printf("RETURN\n"); return -ENOENT; }
/* * Buffer cleaning daemon. */ void buf_daemon(struct proc *p) { struct timeval starttime, timediff; struct buf *bp = NULL; int s, pushed = 0; cleanerproc = curproc; s = splbio(); for (;;) { if (bp == NULL || (pushed >= 16 && UNCLEAN_PAGES < hidirtypages && bcstats.kvaslots_avail > 2 * RESERVE_SLOTS)){ pushed = 0; /* * Wake up anyone who was waiting for buffers * to be released. */ if (needbuffer) { needbuffer = 0; wakeup(&needbuffer); } tsleep(&bd_req, PRIBIO - 7, "cleaner", 0); } getmicrouptime(&starttime); while ((bp = bufcache_getdirtybuf())) { struct timeval tv; if (UNCLEAN_PAGES < lodirtypages && bcstats.kvaslots_avail > 2 * RESERVE_SLOTS && pushed >= 16) break; bufcache_take(bp); buf_acquire(bp); splx(s); if (ISSET(bp->b_flags, B_INVAL)) { brelse(bp); s = splbio(); continue; } #ifdef DIAGNOSTIC if (!ISSET(bp->b_flags, B_DELWRI)) panic("Clean buffer on dirty queue"); #endif if (LIST_FIRST(&bp->b_dep) != NULL && !ISSET(bp->b_flags, B_DEFERRED) && buf_countdeps(bp, 0, 0)) { SET(bp->b_flags, B_DEFERRED); s = splbio(); bufcache_release(bp); buf_release(bp); continue; } bawrite(bp); pushed++; /* Never allow processing to run for more than 1 sec */ getmicrouptime(&tv); timersub(&tv, &starttime, &timediff); s = splbio(); if (timediff.tv_sec) break; } } }
/* * Release a buffer on to the free lists. * Described in Bach (p. 46). */ void brelse(struct buf *bp) { int s; s = splbio(); if (bp->b_data != NULL) KASSERT(bp->b_bufsize > 0); /* * Determine which queue the buffer should be on, then put it there. */ /* If it's not cacheable, or an error, mark it invalid. */ if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) SET(bp->b_flags, B_INVAL); if (ISSET(bp->b_flags, B_INVAL)) { /* * If the buffer is invalid, free it now rather than leaving * it in a queue and wasting memory. */ if (LIST_FIRST(&bp->b_dep) != NULL) buf_deallocate(bp); if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); } if (bp->b_vp) { RB_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp); brelvp(bp); } bp->b_vp = NULL; /* * Wake up any processes waiting for _this_ buffer to * become free. They are not allowed to grab it * since it will be freed. But the only sleeper is * getblk and it will restart the operation after * sleep. */ if (ISSET(bp->b_flags, B_WANTED)) { CLR(bp->b_flags, B_WANTED); wakeup(bp); } buf_put(bp); } else { /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. */ bufcache_release(bp); /* Unlock the buffer. */ CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED)); buf_release(bp); /* Wake up any processes waiting for _this_ buffer to * become free. */ if (ISSET(bp->b_flags, B_WANTED)) { CLR(bp->b_flags, B_WANTED); wakeup(bp); } } /* Wake up syncer and cleaner processes waiting for buffers. */ if (nobuffers) { nobuffers = 0; wakeup(&nobuffers); } /* Wake up any processes waiting for any buffer to become free. */ if (needbuffer && bcstats.numbufpages < targetpages && bcstats.kvaslots_avail > RESERVE_SLOTS) { needbuffer = 0; wakeup(&needbuffer); } splx(s); }