static int write_bootctl(struct boot_ctl *ptbootctl) { int ret = 0; char *p_block = NULL; uint offset_in_block = 0; uint blk_size = 0; if (ptbootctl == NULL) return -1; ptbootctl->crc = crc32(0, (unsigned char *)ptbootctl + CRC_DATA_OFFSET, sizeof(struct boot_ctl) - CRC_DATA_OFFSET); ret = rw_block(true, &p_block, &blk_size, NULL); if (ret) { printf("write_bootctl, rw_block read failed\n"); return -1; } offset_in_block = BOOTCTRL_OFFSET%blk_size; memcpy(p_block + offset_in_block, ptbootctl, sizeof(struct boot_ctl)); ret = rw_block(false, NULL, NULL, p_block); if (ret) { free(p_block); printf("write_bootctl, rw_block write failed\n"); return -1; } free(p_block); return 0; }
/*===========================================================================* * put_block * *===========================================================================*/ void put_block( register struct buf *bp, /* pointer to the buffer to be released */ int block_type /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */ ) { /* Return a block to the list of available blocks. Depending on 'block_type' * it may be put on the front or rear of the LRU chain. Blocks that are * expected to be needed again shortly (e.g., partially full data blocks) * go on the rear; blocks that are unlikely to be needed again shortly * (e.g., full data blocks) go on the front. Blocks whose loss can hurt * the integrity of the file system (e.g., inode blocks) are written to * disk immediately if they are dirty. */ if (bp == NULL) return; /* it is easier to check here than in caller */ bp->b_count--; /* there is one use fewer now */ if (bp->b_count != 0) return; /* block is still in use */ bufs_in_use--; /* one fewer block buffers in use */ /* Put this block back on the LRU chain. If the ONE_SHOT bit is set in * 'block_type', the block is not likely to be needed again shortly, so put * it on the front of the LRU chain where it will be the first one to be * taken when a free buffer is needed later. */ if (bp->b_dev == DEV_RAM || (block_type & ONE_SHOT)) { /* Block probably won't be needed quickly. Put it on front of chain. * It will be the next block to be evicted from the cache. */ bp->b_prev = NULL; bp->b_next = front; if (front == NULL) rear = bp; /* LRU chain was empty */ else front->b_prev = bp; front = bp; } else { /* Block probably will be needed quickly. Put it on rear of chain. * It will not be evicted from the cache for a long time. */ bp->b_prev = rear; bp->b_next = NULL; if (rear == NULL) front = bp; else rear->b_next = bp; rear = bp; } /* Some blocks are so important (e.g., inodes, indirect blocks) that they * should be written to the disk immediately to avoid messing up the file * system in the event of a crash. */ if ((block_type & WRITE_IMMED) && bp->b_dirt==DIRTY && bp->b_dev != NO_DEV) { rw_block(bp, WRITING); } }
static int read_bootctl(struct boot_ctl *ptbootctl) { int ret = 0; unsigned int crc = 0; char *p_block = NULL; uint offset_in_block = 0; uint blk_size = 0; char *pmagic = NULL; if (ptbootctl == NULL) return -1; ret = rw_block(true, &p_block, &blk_size, NULL); if (ret) { printf("read_bootctl, rw_block read failed\n"); return -1; } offset_in_block = BOOTCTRL_OFFSET%blk_size; memcpy(ptbootctl, p_block + offset_in_block, sizeof(struct boot_ctl)); pmagic = ptbootctl->magic; if (!((pmagic[0] == '\0') && (pmagic[1] == 'F') && (pmagic[2] == 'S') && (pmagic[3] == 'L'))) { printf("magic error, %c %c %c %c\n", pmagic[0], pmagic[1], pmagic[2], pmagic[3]); free(p_block); return -1; } /* check crc */ crc = crc32(0, (unsigned char *)ptbootctl + CRC_DATA_OFFSET, sizeof(struct boot_ctl) - CRC_DATA_OFFSET); if (crc != ptbootctl->crc) { printf("crc check failed, caculated %d, read %d\n", crc, ptbootctl->crc); free(p_block); return -1; } free(p_block); return 0; }
struct buffer * bread(dev_t dev, long block) { struct buffer *buf = getblk(dev, block); if (buf == NULL) panic("bread:get_buffer return NULL"); if (!(buf->b_flag & B_VALID)) { rw_block(READ_BUF, buf); /* * lock buffer without to check how hold the lock. * buffer would be unlock by driver. */ irq_lock(); while(buf->b_lock.pid){ sleep_on(&(buf->b_lock.wait)); } buf->b_lock.pid=(CURRENT_TASK())->pid; irq_unlock(); } return buf; }
static struct buffer * getblk(dev_t dev, int block) { struct buffer *buf; lock_buffer_table(); buf = buffer_table; while (buf < buffer_table + NR_BUFFER) { if ((buf->b_dev == dev) && (buf->b_block == block)) { buf->b_count++; unlock_buffer_table(); lock_buffer(buf); return buf; } buf++; } if (!free_list_head) { panic("No availabel buffer"); } if (free_list_head == free_list_head->b_free_next) { buf = free_list_head; free_list_head = NULL; } else { buf = free_list_head; free_list_head = free_list_head->b_free_next; buf->b_free_prev->b_free_next = buf->b_free_next; buf->b_free_next->b_free_prev = buf->b_free_prev; } buf->b_count++; unlock_buffer_table(); lock_buffer(buf); if (buf->b_flag & B_DIRTY) rw_block(READ_BUF, buf); buf->b_dev = dev; buf->b_block = block; buf->b_flag &= ~B_VALID; return buf; }
int sys_sync() { extern void sync_inode(); struct buffer *bh; sync_inode(); for (bh = buffer_table; bh < buffer_table + NR_BUFFER; bh++) { lock_buffer(bh); if (bh->b_flag & B_DIRTY){ /* * buffer shold be unlock in write_block. */ rw_block(WRITE_BUF, bh); irq_lock(); while(bh->b_lock.pid) sleep_on(&(bh->b_lock.wait)); irq_unlock(); } unlock_buffer(bh); } return 0; }
/*===========================================================================* * get_block * *===========================================================================*/ struct buf *get_block( register dev_t dev, /* on which device is the block? */ register block_t block, /* which block is wanted? */ int only_search /* if NO_READ, don't read, else act normal */ ) { /* Check to see if the requested block is in the block cache. If so, return * a pointer to it. If not, evict some other block and fetch it (unless * 'only_search' is 1). All the blocks in the cache that are not in use * are linked together in a chain, with 'front' pointing to the least recently * used block and 'rear' to the most recently used block. If 'only_search' is * 1, the block being requested will be overwritten in its entirety, so it is * only necessary to see if it is in the cache; if it is not, any free buffer * will do. It is not necessary to actually read the block in from disk. * If 'only_search' is PREFETCH, the block need not be read from the disk, * and the device is not to be marked on the block, so callers can tell if * the block returned is valid. * In addition to the LRU chain, there is also a hash chain to link together * blocks whose block numbers end with the same bit strings, for fast lookup. */ int b; static struct buf *bp, *prev_ptr; u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block); assert(buf_hash); assert(buf); assert(nr_bufs > 0); ASSERT(fs_block_size > 0); /* Search the hash chain for (dev, block). Do_read() can use * get_block(NO_DEV ...) to get an unnamed block to fill with zeros when * someone wants to read from a hole in a file, in which case this search * is skipped */ if (dev != NO_DEV) { b = BUFHASH(block); bp = buf_hash[b]; while (bp != NULL) { if (bp->b_blocknr == block && bp->b_dev == dev) { /* Block needed has been found. */ if (bp->b_count == 0) rm_lru(bp); bp->b_count++; /* record that block is in use */ ASSERT(bp->b_bytes == fs_block_size); ASSERT(bp->b_dev == dev); ASSERT(bp->b_dev != NO_DEV); ASSERT(bp->bp); return(bp); } else { /* This block is not the one sought. */ bp = bp->b_hash; /* move to next block on hash chain */ } } } /* Desired block is not on available chain. Take oldest block ('front'). */ if ((bp = front) == NULL) panic("all buffers in use", nr_bufs); if(bp->b_bytes < fs_block_size) { ASSERT(!bp->bp); ASSERT(bp->b_bytes == 0); if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) { ext2_debug("ext2: couldn't allocate a new block.\n"); for(bp = front; bp && bp->b_bytes < fs_block_size; bp = bp->b_next) ; if(!bp) { panic("no buffer available"); } } else { bp->b_bytes = fs_block_size; } } ASSERT(bp); ASSERT(bp->bp); ASSERT(bp->b_bytes == fs_block_size); ASSERT(bp->b_count == 0); rm_lru(bp); /* Remove the block that was just taken from its hash chain. */ b = BUFHASH(bp->b_blocknr); prev_ptr = buf_hash[b]; if (prev_ptr == bp) { buf_hash[b] = bp->b_hash; } else { /* The block just taken is not on the front of its hash chain. */ while (prev_ptr->b_hash != NULL) if (prev_ptr->b_hash == bp) { prev_ptr->b_hash = bp->b_hash; /* found it */ break; } else { prev_ptr = prev_ptr->b_hash; /* keep looking */ } } /* If the block taken is dirty, make it clean by writing it to the disk. * Avoid hysteresis by flushing all other dirty blocks for the same device. */ if (bp->b_dev != NO_DEV) { if (bp->b_dirt == DIRTY) flushall(bp->b_dev); /* Are we throwing out a block that contained something? * Give it to VM for the second-layer cache. */ yieldid = make64(bp->b_dev, bp->b_blocknr); assert(bp->b_bytes == fs_block_size); bp->b_dev = NO_DEV; } /* Fill in block's parameters and add it to the hash chain where it goes. */ bp->b_dev = dev; /* fill in device number */ bp->b_blocknr = block; /* fill in block number */ bp->b_count++; /* record that block is being used */ b = BUFHASH(bp->b_blocknr); bp->b_hash = buf_hash[b]; buf_hash[b] = bp; /* add to hash list */ if(dev == NO_DEV) { if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) { vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE, bp->bp, fs_block_size); } return(bp); /* If the caller wanted a NO_DEV block, work is done. */ } /* Go get the requested block unless searching or prefetching. */ if(only_search == PREFETCH || only_search == NORMAL) { /* Block is not found in our cache, but we do want it * if it's in the vm cache. */ if(vmcache) { /* If we can satisfy the PREFETCH or NORMAL request * from the vm cache, work is done. */ if(vm_yield_block_get_block(yieldid, getid, bp->bp, fs_block_size) == OK) { return bp; } } } if(only_search == PREFETCH) { /* PREFETCH: don't do i/o. */ bp->b_dev = NO_DEV; } else if (only_search == NORMAL) { rw_block(bp, READING); } else if(only_search == NO_READ) { /* we want this block, but its contents * will be overwritten. VM has to forget * about it. */ if(vmcache) { vm_forgetblock(getid); } } else panic("unexpected only_search value: %d", only_search); assert(bp->bp); return(bp); /* return the newly acquired block */ }