Exemple #1
0
/* return negative value on error */
int
bitmap_create(u_int32_t nbits, struct bitmap **bp)
{
	struct bitmap *b;
	u_int32_t words;

	words = DIVROUNDUP(nbits, BITS_PER_WORD);
	b = malloc(sizeof(struct bitmap));
	if (b == NULL) {
		return -ENOMEM;
	}
	b->v = malloc(words * sizeof(WORD_TYPE));
	if (b->v == NULL) {
		free(b);
		return -ENOMEM;
	}

	bzero(b->v, words * sizeof(WORD_TYPE));
	b->nbits = nbits;

	/* Mark any leftover bits at the end in use */
	if (nbits / BITS_PER_WORD < words) {
		u_int32_t j, ix = words - 1;
		u_int32_t overbits = nbits - ix * BITS_PER_WORD;

		assert(nbits / BITS_PER_WORD == words - 1);
		assert(overbits > 0 && overbits < BITS_PER_WORD);

		for (j = overbits; j < BITS_PER_WORD; j++) {
			b->v[ix] |= ((WORD_TYPE) 1 << j);
		}
	}
	*bp = b;
	return 0;
}
Exemple #2
0
struct bitmap *
bitmap_create(unsigned nbits)
{
        struct bitmap *b; 
        unsigned words;

        words = DIVROUNDUP(nbits, BITS_PER_WORD);
        b = (struct bitmap *)malloc(sizeof(struct bitmap));
        if (b == NULL) {
                return NULL;
        }
        b->v = malloc(words*sizeof(unsigned));
        if (b->v == NULL) {
                free(b);
                return NULL;
        }

        memset(b->v, 0, words*sizeof(unsigned));
        b->nbits = nbits;

        /* Mark any leftover bits at the end in use */
        if (words > nbits / BITS_PER_WORD) {
                unsigned j, ix = words-1;
                unsigned overbits = nbits - ix*BITS_PER_WORD;

                assert(nbits / BITS_PER_WORD == words-1);
                assert(overbits > 0 && overbits < BITS_PER_WORD);
                
                for (j=overbits; j<BITS_PER_WORD; j++) {
                        b->v[ix] |= ((unsigned)1 << j);
                }
        }

        return b;
}
int
testfs_read_data(struct inode *in, char *buf, off_t start, size_t size)
{
	char block[BLOCK_SIZE];
	long block_nr = start / BLOCK_SIZE;
	long block_ix = start % BLOCK_SIZE;
	int ret;

	assert(buf);
	if (start + (off_t) size > in->in.i_size) {
		size = in->in.i_size - start;
	}
	if (block_ix + size > BLOCK_SIZE) {
		int req_b = DIVROUNDUP(block_ix+size,BLOCK_SIZE);
		int i;
		size_t read_block_size, remaining_bytes;
		remaining_bytes = size;

		read_block_size = BLOCK_SIZE - block_ix;
		remaining_bytes -= read_block_size;
		if ((ret = testfs_read_block(in, block_nr, block)) < 0)
			return ret;
		memcpy(buf, block+block_ix, read_block_size);
		block_nr++;

		for (i=1;i<req_b; i++)
		{
			memset(&block[0],0,sizeof(block));

			if ((ret = testfs_read_block(in, block_nr, block)) < 0)
				return ret;

			if (remaining_bytes >= BLOCK_SIZE)
			{
				read_block_size = BLOCK_SIZE;
				remaining_bytes -= BLOCK_SIZE;
			}
			else if (remaining_bytes <BLOCK_SIZE)
			{
				read_block_size= remaining_bytes;
				remaining_bytes=0;
			}
			else if (remaining_bytes ==0)
			{
				break;
			}
			memcpy(buf+size - (remaining_bytes + read_block_size), block, read_block_size);
			block_nr++;
		}

		return size;

	}
	if ((ret = testfs_read_block(in, block_nr, block)) < 0)
		return ret;
	memcpy(buf, block + block_ix, size);
	/* return the number of bytes read or any error */
	return size;
}
Exemple #4
0
int testfs_free_blocks(struct inode *in) {
    int i;
    int e_block_nr;

    /* last block number */
    e_block_nr = DIVROUNDUP(in->in.i_size, BLOCK_SIZE);

    /* remove direct blocks */
    for (i = 0; i < e_block_nr && i < NR_DIRECT_BLOCKS; i++) {
        if (in->in.i_block_nr[i] == 0)
            continue;
        testfs_free_block_from_inode(in, in->in.i_block_nr[i]);
        in->in.i_block_nr[i] = 0;
    }
    e_block_nr -= NR_DIRECT_BLOCKS;

    /* remove indirect blocks */
    if (in->in.i_indirect > 0) {
        char block[BLOCK_SIZE];
        read_blocks(in->sb, block, in->in.i_indirect, 1);
        for (i = 0; i < e_block_nr && i < NR_INDIRECT_BLOCKS; i++) {
            testfs_free_block_from_inode(in, ((int *) block)[i]);
            ((int *) block)[i] = 0;
        }
        testfs_free_block_from_inode(in, in->in.i_indirect);
        in->in.i_indirect = 0;
    }

    e_block_nr -= NR_INDIRECT_BLOCKS;
    if (e_block_nr >= 0) {
        int j = 0;
        int deleted = 0;
        char block[BLOCK_SIZE];
        read_blocks(in->sb, block, in->in.i_dindirect, 1);
        for(i = 0; deleted < e_block_nr && i < NR_INDIRECT_BLOCKS; i++){
            if(((int *)block)[i] > 0){
                char single_block[BLOCK_SIZE];
                read_blocks(in->sb, single_block, ((int *)block)[i], 1);
                for(j = 0; deleted < e_block_nr && j < NR_INDIRECT_BLOCKS; j++){
                    testfs_free_block_from_inode(in, ((int *) single_block)[j]);
                    ((int *) single_block)[j] = 0;
                    deleted++;
                }
                testfs_free_block_from_inode(in, ((int *) block)[i]);
                ((int *) block)[i] = 0;
            } else
                deleted += NR_INDIRECT_BLOCKS;
        }
        testfs_free_block_from_inode(in, in->in.i_dindirect);
        in->in.i_dindirect = 0;
    }

    in->in.i_size = 0;
    in->i_flags |= I_FLAGS_DIRTY;
    return 0;
}
Exemple #5
0
static
void
dump_subpage(struct pageref *pr, unsigned generation)
{
	unsigned blocksize = sizes[PR_BLOCKTYPE(pr)];
	unsigned numblocks = PAGE_SIZE / blocksize;
	unsigned numfreewords = DIVROUNDUP(numblocks, 32);
	uint32_t isfree[numfreewords], mask;
	vaddr_t prpage;
	struct freelist *fl;
	vaddr_t blockaddr;
	struct malloclabel *ml;
	unsigned i;

	for (i=0; i<numfreewords; i++) {
		isfree[i] = 0;
	}

	prpage = PR_PAGEADDR(pr);
	fl = (struct freelist *)(prpage + pr->freelist_offset);
	for (; fl != NULL; fl = fl->next) {
		i = ((vaddr_t)fl - prpage) / blocksize;
		mask = 1U << (i % 32);
		isfree[i / 32] |= mask;
	}

	for (i=0; i<numblocks; i++) {
		mask = 1U << (i % 32);
		if (isfree[i / 32] & mask) {
			continue;
		}
		blockaddr = prpage + i * blocksize;
		ml = (struct malloclabel *)blockaddr;
		if (ml->generation != generation) {
			continue;
		}
		kprintf("%5zu bytes at %p, allocated at %p\n",
			blocksize, (void *)blockaddr, (void *)ml->label);
	}
}
Exemple #6
0
/* return negative value on error */
int
bitmap_alloc(struct bitmap *b, u_int32_t * index)
{
	u_int32_t ix;
	u_int32_t maxix = DIVROUNDUP(b->nbits, BITS_PER_WORD);
	u_int32_t offset;

	for (ix = 0; ix < maxix; ix++) {
		if (b->v[ix] != WORD_ALLBITS) {
			for (offset = 0; offset < BITS_PER_WORD; offset++) {
				WORD_TYPE mask = ((WORD_TYPE) 1) << offset;
				if ((b->v[ix] & mask) == 0) {
					b->v[ix] |= mask;
					*index = (ix * BITS_PER_WORD) + offset;
					assert(*index < b->nbits);
					return 0;
				}
			}
			assert(0);
		}
	}
	return -ENOSPC;
}
Exemple #7
0
int
bitmap_alloc(struct bitmap *b, unsigned *index)
{
        unsigned ix;
        unsigned maxix = DIVROUNDUP(b->nbits, BITS_PER_WORD);
        unsigned offset;

        for (ix=0; ix<maxix; ix++) {
                if (b->v[ix]!=WORD_ALLBITS) {
                        for (offset = 0; offset < BITS_PER_WORD; offset++) {
                                unsigned mask = ((unsigned)1) << offset;

                                if ((b->v[ix] & mask)==0) {
                                        b->v[ix] |= mask;
                                        *index = (ix*BITS_PER_WORD)+offset;
                                        assert(*index < b->nbits);
                                        return 0;
                                }
                        }
                        assert(0);
                }
        }
        return 1;
}
Exemple #8
0
/*
 * Thread migration.
 *
 * This is also called periodically from hardclock(). If the current
 * CPU is busy and other CPUs are idle, or less busy, it should move
 * threads across to those other other CPUs.
 *
 * Migrating threads isn't free because of cache affinity; a thread's
 * working cache set will end up having to be moved to the other CPU,
 * which is fairly slow. The tradeoff between this performance loss
 * and the performance loss due to underutilization of some CPUs is
 * something that needs to be tuned and probably is workload-specific.
 *
 * For here and now, because we know we're running on System/161 and
 * System/161 does not (yet) model such cache effects, we'll be very
 * aggressive.
 */
void
thread_consider_migration(void)
{
	unsigned my_count, total_count, one_share, to_send;
	unsigned i, numcpus;
	struct cpu *c;
	struct threadlist victims;
	struct thread *t;

	my_count = total_count = 0;
	numcpus = cpuarray_num(&allcpus);
	for (i=0; i<numcpus; i++) {
		c = cpuarray_get(&allcpus, i);
		spinlock_acquire(&c->c_runqueue_lock);
		total_count += c->c_runqueue.tl_count;
		if (c == curcpu->c_self) {
			my_count = c->c_runqueue.tl_count;
		}
		spinlock_release(&c->c_runqueue_lock);
	}

	one_share = DIVROUNDUP(total_count, numcpus);
	if (my_count < one_share) {
		return;
	}

	to_send = my_count - one_share;
	threadlist_init(&victims);
	spinlock_acquire(&curcpu->c_runqueue_lock);
	for (i=0; i<to_send; i++) {
		t = threadlist_remtail(&curcpu->c_runqueue);
		threadlist_addhead(&victims, t);
	}
	spinlock_release(&curcpu->c_runqueue_lock);

	for (i=0; i < numcpus && to_send > 0; i++) {
		c = cpuarray_get(&allcpus, i);
		if (c == curcpu->c_self) {
			continue;
		}
		spinlock_acquire(&c->c_runqueue_lock);
		while (c->c_runqueue.tl_count < one_share && to_send > 0) {
			t = threadlist_remhead(&victims);
			/*
			 * Ordinarily, curthread will not appear on
			 * the run queue. However, it can under the
			 * following circumstances:
			 *   - it went to sleep;
			 *   - the processor became idle, so it
			 *     remained curthread;
			 *   - it was reawakened, so it was put on the
			 *     run queue;
			 *   - and the processor hasn't fully unidled
			 *     yet, so all these things are still true.
			 *
			 * If the timer interrupt happens at (almost)
			 * exactly the proper moment, we can come here
			 * while things are in this state and see
			 * curthread. However, *migrating* curthread
			 * can cause bad things to happen (Exercise:
			 * Why? And what?) so shuffle it to the end of
			 * the list and decrement to_send in order to
			 * skip it. Then it goes back on our own run
			 * queue below.
			 */
			if (t == curthread) {
				threadlist_addtail(&victims, t);
				to_send--;
				continue;
			}

			t->t_cpu = c;
			threadlist_addtail(&c->c_runqueue, t);
			DEBUG(DB_THREADS,
			      "Migrated thread %s: cpu %u -> %u",
			      t->t_name, curcpu->c_number, c->c_number);
			to_send--;
			if (c->c_isidle) {
				/*
				 * Other processor is idle; send
				 * interrupt to make sure it unidles.
				 */
				ipi_send(c, IPI_UNIDLE);
			}
		}
		spinlock_release(&c->c_runqueue_lock);
	}

	/*
	 * Because the code above isn't atomic, the thread counts may have
	 * changed while we were working and we may end up with leftovers.
	 * Don't panic; just put them back on our own run queue.
	 */
	if (!threadlist_isempty(&victims)) {
		spinlock_acquire(&curcpu->c_runqueue_lock);
		while ((t = threadlist_remhead(&victims)) != NULL) {
			threadlist_addtail(&curcpu->c_runqueue, t);
		}
		spinlock_release(&curcpu->c_runqueue_lock);
	}

	KASSERT(threadlist_isempty(&victims));
	threadlist_cleanup(&victims);
}
Exemple #9
0
/*
 * Called for ftruncate() and from sfs_reclaim.
 */
static
int
sfs_truncate(struct vnode *v, off_t len)
{
	/*
	 * I/O buffer for handling the indirect block.
	 *
	 * Note: in real life (and when you've done the fs assignment)
	 * you would get space from the disk buffer cache for this,
	 * not use a static area.
	 */
	static u_int32_t idbuf[SFS_DBPERIDB];

	struct sfs_vnode *sv = v->vn_data;
	struct sfs_fs *sfs = sv->sv_v.vn_fs->fs_data;

	/* Length in blocks (divide rounding up) */
	u_int32_t blocklen = DIVROUNDUP(len, SFS_BLOCKSIZE);

	u_int32_t i, j, block;
	u_int32_t idblock, baseblock, highblock;
	int result;
	int hasnonzero, iddirty;

	assert(sizeof(idbuf)==SFS_BLOCKSIZE);

	/*
	 * Go through the direct blocks. Discard any that are
	 * past the limit we're truncating to.
	 */
	for (i=0; i<SFS_NDIRECT; i++) {
		block = sv->sv_i.sfi_direct[i];
		if (i >= blocklen && block != 0) {
			sfs_bfree(sfs, block);
			sv->sv_i.sfi_direct[i] = 0;
			sv->sv_dirty = 1;
		}
	}

	/* Indirect block number */
	idblock = sv->sv_i.sfi_indirect;

	/* The lowest block in the indirect block */
	baseblock = SFS_NDIRECT;

	/* The highest block in the indirect block */
	highblock = baseblock + SFS_DBPERIDB - 1;

	if (blocklen < highblock && idblock != 0) {
		/* We're past the proposed EOF; may need to free stuff */

		/* Read the indirect block */
		result = sfs_rblock(sfs, idbuf, idblock);
		if (result) {
			return result;
		}
		
		hasnonzero = 0;
		iddirty = 0;
		for (j=0; j<SFS_DBPERIDB; j++) {
			/* Discard any blocks that are past the new EOF */
			if (blocklen < baseblock+j && idbuf[j] != 0) {
				sfs_bfree(sfs, idbuf[j]);
				idbuf[j] = 0;
				iddirty = 1;
			}
			/* Remember if we see any nonzero blocks in here */
			if (idbuf[j]!=0) {
				hasnonzero=1;
			}
		}

		if (!hasnonzero) {
			/* The whole indirect block is empty now; free it */
			sfs_bfree(sfs, idblock);
			sv->sv_i.sfi_indirect = 0;
			sv->sv_dirty = 1;
		}
		else if (iddirty) {
			/* The indirect block is dirty; write it back */
			result = sfs_wblock(sfs, idbuf, idblock);
			if (result) {
				return result;
			}
		}
	}

	/* Set the file size */
	sv->sv_i.sfi_size = len;

	/* Mark the inode dirty */
	sv->sv_dirty = 1;
	
	return 0;
}
Exemple #10
0
/*
 * Larger kmalloc test. Or at least, potentially larger. The size is
 * an argument.
 *
 * The argument specifies the number of objects to allocate; the size
 * of each allocation rotates through sizes[]. (FUTURE: should there
 * be a mode that allocates random sizes?) In order to hold the
 * pointers returned by kmalloc we first allocate a two-level radix
 * tree whose lower tier is made up of blocks of size PAGE_SIZE/4.
 * (This is so they all go to the subpage allocator rather than being
 * whole-page allocations.)
 *
 * Since PAGE_SIZE is commonly 4096, each of these blocks holds 1024
 * pointers (on a 32-bit machine) or 512 (on a 64-bit machine) and so
 * we can store considerably more pointers than we have memory for
 * before the upper tier becomes a whole page or otherwise gets
 * uncomfortably large.
 *
 * Having set this up, the test just allocates and then frees all the
 * pointers in order, setting and checking the contents.
 */
int
kmalloctest3(int nargs, char **args)
{
#define NUM_KM3_SIZES 5
	static const unsigned sizes[NUM_KM3_SIZES] = { 32, 41, 109, 86, 9 };
	unsigned numptrs;
	size_t ptrspace;
	size_t blocksize;
	unsigned numptrblocks;
	void ***ptrblocks;
	unsigned curblock, curpos, cursizeindex, cursize;
	size_t totalsize;
	unsigned i, j;
	unsigned char *ptr;

	if (nargs != 2) {
		kprintf("kmalloctest3: usage: km3 numobjects\n");
		return EINVAL;
	}

	/* Figure out how many pointers we'll get and the space they need. */
	numptrs = atoi(args[1]);
	ptrspace = numptrs * sizeof(void *);

	/* Figure out how many blocks in the lower tier. */
	blocksize = PAGE_SIZE / 4;
	numptrblocks = DIVROUNDUP(ptrspace, blocksize);

	kprintf("kmalloctest3: %u objects, %u pointer blocks\n",
		numptrs, numptrblocks);

	/* Allocate the upper tier. */
	ptrblocks = kmalloc(numptrblocks * sizeof(ptrblocks[0]));
	if (ptrblocks == NULL) {
		panic("kmalloctest3: failed on pointer block array\n");
	}
	/* Allocate the lower tier. */
	for (i=0; i<numptrblocks; i++) {
		ptrblocks[i] = kmalloc(blocksize);
		if (ptrblocks[i] == NULL) {
			panic("kmalloctest3: failed on pointer block %u\n", i);
		}
	}

	/* Allocate the objects. */
	curblock = 0;
	curpos = 0;
	cursizeindex = 0;
	totalsize = 0;
	for (i=0; i<numptrs; i++) {
		cursize = sizes[cursizeindex];
		ptr = kmalloc(cursize);
		if (ptr == NULL) {
			kprintf("kmalloctest3: failed on object %u size %u\n",
				i, cursize);
			kprintf("kmalloctest3: pos %u in pointer block %u\n",
				curpos, curblock);
			kprintf("kmalloctest3: total so far %zu\n", totalsize);
			panic("kmalloctest3: failed.\n");
		}
		/* Fill the object with its number. */
		for (j=0; j<cursize; j++) {
			ptr[j] = (unsigned char) i;
		}
		/* Move to the next slot in the tree. */
		ptrblocks[curblock][curpos] = ptr;
		curpos++;
		if (curpos >= blocksize / sizeof(void *)) {
			curblock++;
			curpos = 0;
		}
		/* Update the running total, and rotate the size. */
		totalsize += cursize;
		cursizeindex = (cursizeindex + 1) % NUM_KM3_SIZES;
	}

	kprintf("kmalloctest3: %zu bytes allocated\n", totalsize);

	/* Free the objects. */
	curblock = 0;
	curpos = 0;
	cursizeindex = 0;
	for (i=0; i<numptrs; i++) {
		PROGRESS(i);
		cursize = sizes[cursizeindex];
		ptr = ptrblocks[curblock][curpos];
		KASSERT(ptr != NULL);
		for (j=0; j<cursize; j++) {
			if (ptr[j] == (unsigned char) i) {
				continue;
			}
			kprintf("kmalloctest3: failed on object %u size %u\n",
				i, cursize);
			kprintf("kmalloctest3: pos %u in pointer block %u\n",
				curpos, curblock);
			kprintf("kmalloctest3: at object offset %u\n", j);
			kprintf("kmalloctest3: expected 0x%x, found 0x%x\n",
				ptr[j], (unsigned char) i);
			panic("kmalloctest3: failed.\n");
		}
		kfree(ptr);
		curpos++;
		if (curpos >= blocksize / sizeof(void *)) {
			curblock++;
			curpos = 0;
		}
		KASSERT(totalsize > 0);
		totalsize -= cursize;
		cursizeindex = (cursizeindex + 1) % NUM_KM3_SIZES;
	}
	KASSERT(totalsize == 0);

	/* Free the lower tier. */
	for (i=0; i<numptrblocks; i++) {
		PROGRESS(i);
		KASSERT(ptrblocks[i] != NULL);
		kfree(ptrblocks[i]);
	}
	/* Free the upper tier. */
	kfree(ptrblocks);

	kprintf("\n");
	success(TEST161_SUCCESS, SECRET, "km3");
	return 0;
}
int
testfs_write_data(struct inode *in, const char *buf, off_t start, size_t size)
{

	char block[BLOCK_SIZE];
	long block_nr = start / BLOCK_SIZE;
	//printf("__________ Enters write data with block_nr=%ld \n",block_nr);
	long block_ix = start % BLOCK_SIZE;
	int ret;
	// int req_b= ((start+size)/BLOCK_SIZE);
	long long part=start;

	if (block_ix + size > BLOCK_SIZE) {
		int req_b= DIVROUNDUP(block_ix+size,BLOCK_SIZE);
	
		int i;
		size_t write_block_size, remaining_bytes;
		remaining_bytes = size;

		write_block_size = BLOCK_SIZE - block_ix;
		remaining_bytes = remaining_bytes-write_block_size;
		ret = testfs_allocate_block(in, block_nr, block);
		if (ret < 0)
			return ret;
		memcpy(block+block_ix, buf, write_block_size);
		write_blocks(in->sb,block,ret,1);

		part += write_block_size;

		block_nr++;

		for (i=1; i<req_b; i++)
		{
			 
			memset(&block[0],0,sizeof(block));
			ret = testfs_allocate_block(in, block_nr, block);

			if (ret < 0){
				break;
			}
			if (remaining_bytes >= BLOCK_SIZE)
			{
				write_block_size=BLOCK_SIZE;
				remaining_bytes=remaining_bytes - BLOCK_SIZE;
			}

			else if (remaining_bytes < BLOCK_SIZE)
			{
				write_block_size=remaining_bytes;
				remaining_bytes=0;
			}

			else if (remaining_bytes == 0)
			{
				break;
			}

			part += write_block_size;

			memcpy(block, buf+size - (remaining_bytes + write_block_size), write_block_size);
			write_blocks(in->sb, block, ret, 1);
			block_nr++;
		}

		

		if (size > 0)
			in->in.i_size = MAX(in->in.i_size, part);
		in->i_flags |= I_FLAGS_DIRTY;
		if (ret<0)
			return ret;
		return size;
	}
	/* ret is the newly allocated physical block number */
	ret = testfs_allocate_block(in, block_nr, block);
	if (ret < 0)
		return ret;
	memcpy(block + block_ix, buf, size);
	write_blocks(in->sb, block, ret, 1);
	/* increment i_size by the number of bytes written. */
	if (size > 0)
		in->in.i_size = MAX(in->in.i_size, start + (off_t) size);
	in->i_flags |= I_FLAGS_DIRTY;
	/* return the number of bytes written or any error */
	return size;
}
/* given logical block number, allocate a new physical block, if it does not
 * exist already, and return the physical block number that is allocated.
 * returns negative value on error. */
 static int
 testfs_allocate_block(struct inode *in, int log_block_nr, char *block)
 {
 	if (log_block_nr>4196361)
 		return -EFBIG;
 	// //printf("log_block_nr:%d\n",log_block_nr);
 	// }

 	int phy_block_nr;
 	char indirect[BLOCK_SIZE];
 	char dindirect[BLOCK_SIZE];
 	char dindirect2[BLOCK_SIZE];
 	int indirect_allocated = 0;
 	//int dindirect_allocated = 0;
 	
 	assert(log_block_nr >= 0);
 	phy_block_nr = testfs_read_block(in, log_block_nr, block);

	/* phy_block_nr > 0: block exists, so we don't need to allocate it, 
	   phy_block_nr < 0: some error */
 	if (phy_block_nr != 0)
 		return phy_block_nr;

	/* allocate a direct block */
 	if (log_block_nr < NR_DIRECT_BLOCKS) {
 		assert(in->in.i_block_nr[log_block_nr] == 0);
 		phy_block_nr = testfs_alloc_block_for_inode(in);
 		if (phy_block_nr >= 0) {
 			in->in.i_block_nr[log_block_nr] = phy_block_nr;
 		}
 		return phy_block_nr;
 	}

 	log_block_nr -= NR_DIRECT_BLOCKS;

 	if (log_block_nr >= NR_INDIRECT_BLOCKS)
 	{
 	
 		log_block_nr -= NR_INDIRECT_BLOCKS; //log block ranges from 0 ~ 4194303
 		
 		int level1_block = DIVROUNDUP(log_block_nr+1,2048) - 1;   // ranges 0~2047
 		
 		int level2_block= log_block_nr % 2048;

 		/* allocate an dindirect block */
 		if (in->in.i_dindirect == 0) 	
 		{	
 			bzero(dindirect, BLOCK_SIZE);
 			phy_block_nr = testfs_alloc_block_for_inode(in);
 			if (phy_block_nr < 0)
 				return phy_block_nr;
 			//dindirect_allocated = 1;
 			in->in.i_dindirect = phy_block_nr;
 			//printf("dindirect block #:%d\n",phy_block_nr);
 		}

 		else   /* read dindirect block */
 		{
 			read_blocks(in->sb, dindirect, in->in.i_dindirect, 1); 
 		}



 		// Allocate 2nd level indirect block
 		if (((int *)dindirect)[level1_block] == 0)
 		{
 			bzero(dindirect2, BLOCK_SIZE);
 			phy_block_nr = testfs_alloc_block_for_inode(in);
 			if (phy_block_nr < 0)
 				return phy_block_nr;
 			((int*)dindirect)[level1_block]=phy_block_nr;
	 		//printf("2nd dindirect block #:%d\n", ((int*)dindirect)[level1_block]);
 			write_blocks(in->sb, dindirect, in->in.i_dindirect,1);
 		}

 		// error here....
 		// Read 2nd level indirect block
 		else 
 		{	
 			read_blocks(in->sb, dindirect2, ((int*)dindirect)[level1_block], 1);
 		}


 		// allocate direct block */
 		phy_block_nr=testfs_alloc_block_for_inode(in);


	 	if (phy_block_nr >=0)  // update 2nd level indirect block
	 	{
	 		((int*)dindirect2)[level2_block]=phy_block_nr;
	 		//printf("Last direct block #:%d\n", ((int*)dindirect2)[level2_block]);
	 		write_blocks(in->sb, dindirect2, ((int*)dindirect)[level1_block], 1);
	 	}
	 	
	 	return phy_block_nr;
 		// Allocate 2nd-level indirect block

	 }

 	/**** Allocating in 1st level indirect blocks ****/

	if (in->in.i_indirect == 0) 	/* allocate an indirect block */
	 {	
	 	bzero(indirect, BLOCK_SIZE);
	 	phy_block_nr = testfs_alloc_block_for_inode(in);
	 	if (phy_block_nr < 0)
	 		return phy_block_nr;
	 	indirect_allocated = 1;
	 	in->in.i_indirect = phy_block_nr;
	 } 

	 else 
	{	/* read indirect block */
	 	read_blocks(in->sb, indirect, in->in.i_indirect, 1);
	}

	/* allocate direct block */
	assert(((int *)indirect)[log_block_nr] == 0);	
	phy_block_nr = testfs_alloc_block_for_inode(in);

	if (phy_block_nr >= 0) {
		/* update indirect block */
		((int *)indirect)[log_block_nr] = phy_block_nr;
		write_blocks(in->sb, indirect, in->in.i_indirect, 1);
	} 

	else if (indirect_allocated) 
	{
		/* free the indirect block that was allocated */
		testfs_free_block_from_inode(in, in->in.i_indirect);
	}

	return phy_block_nr;

}
/* given logical block number, read the corresponding physical block into block.
 * return physical block number.
 * returns 0 if physical block does not exist.
 * returns negative value on other errors. */
 static int
 testfs_read_block(struct inode *in, int log_block_nr, char *block)
 {
 	//printf("log_block_nr:%d\n",log_block_nr);
 	int phy_block_nr = 0;
 	assert(log_block_nr >= 0);

 	// when log block number is less 0~9
 	if (log_block_nr < NR_DIRECT_BLOCKS) {
 		phy_block_nr = (int)in->in.i_block_nr[log_block_nr];
 	}

 	else {
 		log_block_nr -= NR_DIRECT_BLOCKS;

 		if (log_block_nr >= NR_INDIRECT_BLOCKS)
 		{
 			//need to implement DID part, not done yet.
 			if (in->in.i_dindirect > 0)
 			{
 	 		log_block_nr -= NR_INDIRECT_BLOCKS; //log block ranges from 0 ~ 4194303

 	 		int level1_block = DIVROUNDUP(log_block_nr + 1,2048) - 1;   // ranges 0~2047

	 		int level2_block= log_block_nr % 2048;


	 		read_blocks(in->sb, block, in->in.i_dindirect, 1);

	 		
	 		if (((int*)block)[level1_block]!=0)
	 		{
	 			read_blocks(in->sb, block, ((int*)block)[level1_block], 1);

	 			if (((int*)block)[level2_block]!=0)
	 			{
	 				phy_block_nr = ((int *)block)[level2_block];
	 				read_blocks(in->sb, block, phy_block_nr, 1);
	 				return phy_block_nr;	
	 			}

	 		}
	 	}
	 	return phy_block_nr;
	 	}

	 if (in->in.i_indirect > 0) {
	 	read_blocks(in->sb, block, in->in.i_indirect, 1);
	 	phy_block_nr = ((int *)block)[log_block_nr];
	 }
	}

	if (phy_block_nr > 0) {
		read_blocks(in->sb, block, phy_block_nr, 1);
	} else {

		/* we support sparse files by zeroing out a block that is not
		 * allocated on disk. */
		bzero(block, BLOCK_SIZE);
	}
	return phy_block_nr;
}
Exemple #14
0
/*
 * Check that a particular heap page (the one managed by the argument
 * PR) is valid.
 *
 * This checks:
 *    - that the page is within MIPS_KSEG0 (for mips)
 *    - that the freelist starting point in PR is valid
 *    - that the number of free blocks is consistent with the freelist
 *    - that each freelist next pointer points within the page
 *    - that no freelist pointer points to the middle of a block
 *    - that free blocks are still deadbeefed (if CHECKBEEF)
 *    - that the freelist is not circular
 *    - that the guard bands are intact on all allocated blocks (if
 *      CHECKGUARDS)
 *
 * Note that if CHECKGUARDS is set, a circular freelist will cause an
 * assertion as a bit in isfree is set twice; if not, a circular
 * freelist will cause an infinite loop.
 */
static
void
checksubpage(struct pageref *pr)
{
	vaddr_t prpage, fla;
	struct freelist *fl;
	int blktype;
	int nfree=0;
	size_t blocksize;
#ifdef CHECKGUARDS
	const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE;
	const unsigned numfreewords = DIVROUNDUP(maxblocks, 32);
	uint32_t isfree[numfreewords], mask;
	unsigned numblocks, blocknum, i;
	size_t smallerblocksize;
#endif

	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	if (pr->freelist_offset == INVALID_OFFSET) {
		KASSERT(pr->nfree==0);
		return;
	}

	prpage = PR_PAGEADDR(pr);
	blktype = PR_BLOCKTYPE(pr);
	KASSERT(blktype >= 0 && blktype < NSIZES);
	blocksize = sizes[blktype];

#ifdef CHECKGUARDS
	smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
	for (i=0; i<numfreewords; i++) {
		isfree[i] = 0;
	}
#endif

#ifdef __mips__
	KASSERT(prpage >= MIPS_KSEG0);
	KASSERT(prpage < MIPS_KSEG1);
#endif

	KASSERT(pr->freelist_offset < PAGE_SIZE);
	KASSERT(pr->freelist_offset % blocksize == 0);

	fla = prpage + pr->freelist_offset;
	fl = (struct freelist *)fla;

	for (; fl != NULL; fl = fl->next) {
		fla = (vaddr_t)fl;
		KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE);
		KASSERT((fla-prpage) % blocksize == 0);
#ifdef CHECKBEEF
		checkdeadbeef(fl, blocksize);
#endif
#ifdef CHECKGUARDS
		blocknum = (fla-prpage) / blocksize;
		mask = 1U << (blocknum % 32);
		KASSERT((isfree[blocknum / 32] & mask) == 0);
		isfree[blocknum / 32] |= mask;
#endif
		KASSERT(fl->next != fl);
		nfree++;
	}
	KASSERT(nfree==pr->nfree);

#ifdef CHECKGUARDS
	numblocks = PAGE_SIZE / blocksize;
	for (i=0; i<numblocks; i++) {
		mask = 1U << (i % 32);
		if ((isfree[i / 32] & mask) == 0) {
			checkguardband(prpage + i * blocksize,
				       smallerblocksize, blocksize);
		}
	}
#endif
}