Exemplo n.º 1
0
bool Index::check_file_existence(uint32_t dir_hash, uint32_t filename_hash) const
{
    auto dir_it = get_hash_table().find(dir_hash);
    if (dir_it != get_hash_table().end())
    {
        return (dir_it->second.find(filename_hash) != dir_it->second.end());
    }
    return false;
}
Exemplo n.º 2
0
const Index::DirHashTable& Index::get_dir_hash_table(uint32_t dir_hash) const
{
    auto dir_it = get_hash_table().find(dir_hash);
    if (dir_it == get_hash_table().end())
    {
        throw std::runtime_error("dir_hash not found");
    }
    else
    {
        return dir_it->second;
    }
}
Exemplo n.º 3
0
static int trunc_direct(struct inode * inode)
{
	struct super_block * sb;
	unsigned int i;
	unsigned long * p;
	unsigned long block;
	struct buffer_head * bh;
	int retry = 0;

	sb = inode->i_sb;
repeat:
	for (i = ((unsigned long) inode->i_size + BLOCK_SIZE-1) / BLOCK_SIZE; i < 10; i++) {
		p = inode->u.sysv_i.i_data + i;
		block = *p;
		if (!block)
			continue;
		bh = get_hash_table(inode->i_dev,block+sb->sv_block_base,BLOCK_SIZE);
		if (i*BLOCK_SIZE < inode->i_size) {
			brelse(bh);
			goto repeat;
		}
		if ((bh && bh->b_count != 1) || (block != *p)) {
			retry = 1;
			brelse(bh);
			continue;
		}
		*p = 0;
		inode->i_dirt = 1;
		brelse(bh);
		sysv_free_block(sb,block);
	}
	return retry;
}
Exemplo n.º 4
0
static int sync_block (struct inode * inode, u32 * block, int wait)
{
	struct buffer_head * bh;
	int tmp;
	
	if (!*block)
		return 0;
	tmp = *block;
	bh = get_hash_table (inode->i_dev, *block, blocksize);
	if (!bh)
		return 0;
	if (*block != tmp) {
		brelse (bh);
		return 1;
	}
	if (wait && buffer_req(bh) && !buffer_uptodate(bh)) {
		brelse (bh);
		return -1;
	}
	if (wait || !buffer_uptodate(bh) || !buffer_dirty(bh)) {
		brelse (bh);
		return 0;
	}
	ll_rw_block (WRITE, 1, &bh);
	bh->b_count--;
	return 0;
}
Exemplo n.º 5
0
Tcl_DString *tcl_krb5_create_object(Tcl_Interp *interp,
				    char *type,
				    ClientData datum)
{
     Tcl_HashTable *table;
     Tcl_DString *handle;
     Tcl_HashEntry *entry;
     int entry_created = 0;

     if (! (table = get_hash_table(interp, type))) {
	  return 0;
     }

     if (! (handle = get_new_handle(interp, type))) {
	  return 0;
     }

     if (! (entry = Tcl_CreateHashEntry(table, handle, &entry_created))) {
	  Tcl_SetResult(interp, "error creating hash entry", TCL_STATIC);
	  Tcl_DStringFree(handle);
	  return TCL_ERROR;
     }

     assert(entry_created);

     Tcl_SetHashValue(entry, datum);

     return handle;
}
Exemplo n.º 6
0
/*
 * The functions for minix V1 fs truncation.
 */
static int V1_trunc_direct(register struct inode *inode)
{
    unsigned short *p;
    register struct buffer_head *bh;
    unsigned short tmp;
    int i;
    int retry = 0;

  repeat:
    for (i = DIRECT_BLOCK; i < 7; i++) {
	p = &inode->i_zone[i];
	if (!(tmp = *p))
	    continue;
	bh = get_hash_table(inode->i_dev, (block_t) tmp);
	if (i < DIRECT_BLOCK) {
	    brelse(bh);
	    goto repeat;
	}
	if ((bh && bh->b_count != 1) || tmp != *p) {
	    retry = 1;
	    brelse(bh);
	    continue;
	}
	*p = 0;
	inode->i_dirt = 1;
	if (bh) {
	    mark_buffer_clean(bh);
	    brelse(bh);
	}
	minix_free_block(inode->i_sb, tmp);
    }
    return retry;
}
Exemplo n.º 7
0
static int V1_trunc_indirect(register struct inode *inode,
			     int offset, unsigned short *p)
{
    struct buffer_head *bh;
    int i;
    unsigned short tmp;
    register struct buffer_head *ind_bh;
    unsigned short *ind;
    int retry = 0;

    tmp = *p;
    if (!tmp) return 0;
    ind_bh = bread(inode->i_dev, (block_t) tmp);
    if (tmp != *p) {
	brelse(ind_bh);
	return 1;
    }
    if (!ind_bh) {
	*p = 0;
	return 0;
    }
    map_buffer(ind_bh);
  repeat:
    for (i = INDIRECT_BLOCK(offset); i < 512; i++) {
	if (i < 0) i = 0;
	else if (i < INDIRECT_BLOCK(offset)) goto repeat;
	ind = i + (unsigned short *) ind_bh->b_data;
	tmp = *ind;
	if (!tmp) continue;
	bh = get_hash_table(inode->i_dev, (block_t) tmp);
	if (i < INDIRECT_BLOCK(offset)) {
	    brelse(bh);
	    goto repeat;
	}
	if ((bh && bh->b_count != 1) || tmp != *ind) {
	    retry = 1;
	    brelse(bh);
	    continue;
	}
	*ind = 0;
	mark_buffer_dirty(ind_bh, 1);
	brelse(bh);
	minix_free_block(inode->i_sb, tmp);
    }
    ind = (unsigned short *) ind_bh->b_data;
    for (i = 0; i < 512; i++)
	if (*(ind++)) break;
    if (i >= 512) {
	if (ind_bh->b_count != 1) retry = 1;
	else {
	    tmp = *p;
	    *p = 0;
	    minix_free_block(inode->i_sb, tmp);
	}
    }
    unmap_brelse(ind_bh);
    return retry;
}
Exemplo n.º 8
0
struct buffer_head * getblk(int dev,int block)
{
	struct buffer_head * bh, * tmp;
	int buffers;

repeat:
	if (bh = get_hash_table(dev,block))
		return bh;
	buffers = NR_BUFFERS;
	tmp = free_list;
	do {
		tmp = tmp->b_next_free;
		if (tmp->b_count)
			continue;
		if (!bh || BADNESS(tmp)<BADNESS(bh)) {
			bh = tmp;
			if (!BADNESS(tmp))
				break;
		}
		if (tmp->b_dirt)
			ll_rw_block(WRITEA,tmp);
/* and repeat until we find something good */
	} while (buffers--);
	if (!bh) {
		sleep_on(&buffer_wait);
		goto repeat;
	}
	wait_on_buffer(bh);
	if (bh->b_count)
		goto repeat;
	while (bh->b_dirt) {
		sync_dev(bh->b_dev);
		wait_on_buffer(bh);
		if (bh->b_count)
			goto repeat;
	}
/* NOTE!! While we slept waiting for this block, somebody else might */
/* already have added "this" block to the cache. check it */
	if (find_buffer(dev,block))
		goto repeat;
/* OK, FINALLY we know that this buffer is the only one of it's kind, */
/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
	bh->b_count=1;
	bh->b_dirt=0;
	bh->b_uptodate=0;
	remove_from_queues(bh);
	bh->b_dev=dev;
	bh->b_blocknr=block;
	insert_into_queues(bh);
	return bh;
}
Exemplo n.º 9
0
/*
 * Ok, this is getblk, and it isn't very clear, again to hinder
 * race-conditions. Most of the code is seldom used, (ie repeating),
 * so it should be much more efficient than it looks.
 */
struct buffer_head * getblk(int dev,int block)
{
	struct buffer_head * tmp;

repeat:
	if ((tmp=get_hash_table(dev,block)))
		return tmp;
	tmp = free_list;
	do {
		if (!tmp->b_count) {
			wait_on_buffer(tmp);	/* we still have to wait */
			if (!tmp->b_count)	/* on it, it might be dirty */
				break;
		}
		tmp = tmp->b_next_free;
	} while (tmp != free_list || (tmp=NULL));
	/* Kids, don't try THIS at home ^^^^^. Magic */
	if (!tmp) {
		printk("Sleeping on free buffer ..");
		sleep_on(&buffer_wait);
		printk("ok\n");
		goto repeat;
	}
	tmp->b_count++;
	remove_from_queues(tmp);
/*
 * Now, when we know nobody can get to this node (as it's removed from the
 * free list), we write it out. We can sleep here without fear of race-
 * conditions.
 */
	if (tmp->b_dirt)
		sync_dev(tmp->b_dev);
/* update buffer contents */
	tmp->b_dev=dev;
	tmp->b_blocknr=block;
	tmp->b_dirt=0;
	tmp->b_uptodate=0;
/* NOTE!! While we possibly slept in sync_dev(), somebody else might have
 * added "this" block already, so check for that. Thank God for goto's.
 */
	if (find_buffer(dev,block)) {
		tmp->b_dev=0;		/* ok, someone else has beaten us */
		tmp->b_blocknr=0;	/* to it - free this block and */
		tmp->b_count=0;		/* try again */
		insert_into_queues(tmp);
		goto repeat;
	}
/* and then insert into correct position */
	insert_into_queues(tmp);
	return tmp;
}
Exemplo n.º 10
0
/*
 * Try-to-share-buffers tries to minimize memory use by trying to keep
 * both code pages and the buffer area in the same page. This is done by
 * (a) checking if the buffers are already aligned correctly in memory and
 * (b) if none of the buffer heads are in memory at all, trying to load
 * them into memory the way we want them.
 *
 * This doesn't guarantee that the memory is shared, but should under most
 * circumstances work very well indeed (ie >90% sharing of code pages on
 * demand-loadable executables).
 */
static inline unsigned long try_to_share_buffers(unsigned long address,
	dev_t dev, int *b, int size)
{
	struct buffer_head * bh;
	int block;

	block = b[0];
	if (!block)
		return 0;
	bh = get_hash_table(dev, block, size);
	if (bh)
		return check_aligned(bh, address, dev, b, size);
	return try_to_load_aligned(address, dev, b, size);
}
Exemplo n.º 11
0
void zz_manager::for_each (zz_device_objects_callback callback)
{
	zz_hash_table<zz_node*>::iterator it, it_end;
	zz_hash_table<zz_node*> * nodes = get_hash_table(); // All children nodes could be accessed via name hash table.

	if (!nodes) {
		return;
	}

	//ZZ_LOG("manager: for_each(%x)", nodes);
	//ZZ_LOG("(%s)-%d\n", get_name(), nodes->size());

	for (it = nodes->begin(), it_end = nodes->end(); it != it_end; ++it) {
		callback(*it);
	}
}
Exemplo n.º 12
0
void ext_free_block(struct super_block * sb, int block)
{
	struct buffer_head * bh;
	struct ext_free_block * efb;

	if (!sb) {
		printk("trying to free block on non-existent device\n");
		return;
	}
	lock_super (sb);
	if (block < sb->u.ext_sb.s_firstdatazone ||
	    block >= sb->u.ext_sb.s_nzones) {
		printk("trying to free block not in datazone\n");
		return;
	}
	bh = get_hash_table(sb->s_dev, block, sb->s_blocksize);
	if (bh)
		mark_buffer_clean(bh);
	brelse(bh);
	if (sb->u.ext_sb.s_firstfreeblock)
		efb = (struct ext_free_block *) sb->u.ext_sb.s_firstfreeblock->b_data;
	if (!sb->u.ext_sb.s_firstfreeblock || efb->count == 254) {
#ifdef EXTFS_DEBUG
printk("ext_free_block: block full, skipping to %d\n", block);
#endif
		if (sb->u.ext_sb.s_firstfreeblock)
			brelse (sb->u.ext_sb.s_firstfreeblock);
		if (!(sb->u.ext_sb.s_firstfreeblock = bread (sb->s_dev,
			block, sb->s_blocksize)))
			panic ("ext_free_block: unable to read block to free\n");
		efb = (struct ext_free_block *) sb->u.ext_sb.s_firstfreeblock->b_data;
		efb->next = sb->u.ext_sb.s_firstfreeblocknumber;
		efb->count = 0;
		sb->u.ext_sb.s_firstfreeblocknumber = block;
	} else {
		efb->free[efb->count++] = block;
	}
	sb->u.ext_sb.s_freeblockscount ++;
	sb->s_dirt = 1;
	mark_buffer_dirty(sb->u.ext_sb.s_firstfreeblock, 1);
	unlock_super (sb);
	return;
}
Exemplo n.º 13
0
Arquivo: buffer.c Projeto: foolsh/elks
struct buffer_head *getblk(kdev_t dev, block_t block)
{
    register struct buffer_head *bh;


    /* If there are too many dirty buffers, we wake up the update process
     * now so as to ensure that there are still clean buffers available
     * for user processes to use (and dirty) */

    do {
	bh = get_hash_table(dev, block);
	if (bh != NULL) {
	    if (buffer_clean(bh) && buffer_uptodate(bh))
		put_last_lru(bh);
	    return bh;
	}

	/* I think the following check is redundant
	 * So I will remove it for now
	 */

    } while(find_buffer(dev, block));

    /*
     *      Create a buffer for this job.
     */
    bh = get_free_buffer();

/* OK, FINALLY we know that this buffer is the only one of its kind,
 * and that it's unused (b_count=0), unlocked (buffer_locked=0), and clean
 */

    bh->b_count = 1;
    bh->b_dirty = 0;
    bh->b_lock = 0;
    bh->b_uptodate = 0;
    bh->b_dev = dev;
    bh->b_blocknr = block;
    bh->b_seg = kernel_ds;

    return bh;
}
Exemplo n.º 14
0
static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
	dev_t dev, int *b, int size)
{
	struct buffer_head * bh[8];
	unsigned long page;
	unsigned long offset;
	int block;
	int nrbuf;

	page = (unsigned long) first->b_data;
	if (page & ~PAGE_MASK) {
		brelse(first);
		return 0;
	}
	mem_map[MAP_NR(page)]++;
	bh[0] = first;
	nrbuf = 1;
	for (offset = size ; offset < PAGE_SIZE ; offset += size) {
		block = *++b;
		if (!block)
			goto no_go;
		first = get_hash_table(dev, block, size);
		if (!first)
			goto no_go;
		bh[nrbuf++] = first;
		if (page+offset != (unsigned long) first->b_data)
			goto no_go;
	}
	read_buffers(bh,nrbuf);		/* make sure they are actually read correctly */
	while (nrbuf-- > 0)
		brelse(bh[nrbuf]);
	free_page(address);
	++current->min_flt;
	return page;
no_go:
	while (nrbuf-- > 0)
		brelse(bh[nrbuf]);
	free_page(page);
	return 0;
}
Exemplo n.º 15
0
ClientData tcl_krb5_get_object(Tcl_Interp *interp,
			       char *handle)
{
     char *myhandle, *id_ptr;
     Tcl_HashTable *table;
     Tcl_HashEntry *entry;

     if (! (myhandle = strdup(handle))) {
	  Tcl_SetResult(interp, memory_error, TCL_STATIC);
	  return 0;
     }

     if (! (id_ptr = index(myhandle, *SEP_STR))) {
	  free(myhandle);
	  Tcl_ResetResult(interp);
	  Tcl_AppendResult(interp, "malformatted handle \"", handle,
			   "\"", 0);
	  return 0;
     }

     *id_ptr = '\0';
     
     if (! (table = get_hash_table(interp, myhandle))) {
	  free(myhandle);
	  return 0;
     }

     free(myhandle);

     if (! (entry = Tcl_FindHashEntry(table, handle))) {
	  Tcl_ResetResult(interp);
	  Tcl_AppendResult(interp, "no object corresponding to handle \"",
			   handle, "\"", 0);
	  return 0;
     }

     return(Tcl_GetHashValue(entry));
}
// unbind object textures to change loading scheme.
// this method will be called to change texture loading quality level.
bool zz_manager_texture::unbind_object_textures ()
{
	zz_hash_table<zz_node*>::iterator it, it_end;
	zz_hash_table<zz_node*> * nodes = get_hash_table();
	zz_texture * tex;

	if (!nodes) return true;

	for (it = nodes->begin(), it_end = nodes->end(); it != it_end; ++it) {
		tex = static_cast<zz_texture*>(*it);

		if (tex->get_for_image()) { // no need to unbind sprite image textures as these texture's reloading scheme is not changed
			continue;
		}

		if (tex->get_lock_texture()) // skip if the texture is locked
			continue;

		tex->unbind_device();
	}

	return true;
}
Exemplo n.º 17
0
void filter_by_id(const char* fn, hash_table* T)
{
    fprintf(stderr, "filtering ... \n");

    samfile_t* fin = samopen(fn, "rb", NULL);
    if (fin == NULL) {
        fprintf(stderr, "can't open bam file %s\n", fn);
        exit(1);
    }

    samfile_t* fout = samopen("-", "w", (void*)fin->header);
    if (fout == NULL) {
        fprintf(stderr, "can't open stdout, for some reason.\n");
        exit(1);
    }

    fputs(fin->header->text, stdout);

    bam1_t* b = bam_init1();
    uint32_t n = 0;

    while (samread(fin, b) >= 0) {
        if (++n % 1000000 == 0) {
            fprintf(stderr, "\t%d reads\n", n);
        }

        if (get_hash_table(T, bam1_qname(b), b->core.l_qname) == 1) {
            samwrite(fout, b);
        }
    }

    bam_destroy1(b);
    samclose(fout);
    samclose(fin);

    fprintf(stderr, "done.\n");
}
Exemplo n.º 18
0
//// 取高速缓冲中指定的缓冲区。
// 检查所指定的缓冲区是否已经在高速缓冲中,如果不在,就需要在高速缓冲中建立一个对应的新项。
// 返回相应缓冲区头指针。
struct buffer_head * getblk(int dev,int block)
{
	struct buffer_head * tmp, * bh;

repeat:
	// 搜索hash 表,如果指定块已经在高速缓冲中,则返回对应缓冲区头指针,退出。
	if (bh = get_hash_table(dev,block))
		return bh;
// 扫描空闲数据块链表,寻找空闲缓冲区。
// 首先让tmp 指向空闲链表的第一个空闲缓冲区头。
	tmp = free_list;
	do {
// 如果该缓冲区正被使用(引用计数不等于0),则继续扫描下一项。
		if (tmp->b_count)
			continue;
// 如果缓冲头指针bh 为空,或者tmp 所指缓冲头的标志(修改、锁定)权重小于bh 头标志的权重,
// 则让bh 指向该tmp 缓冲区头。如果该tmp 缓冲区头表明缓冲区既没有修改也没有锁定标志置位,
// 则说明已为指定设备上的块取得对应的高速缓冲区,则退出循环。
		if (!bh || BADNESS(tmp)<BADNESS(bh)) {
			bh = tmp;
			if (!BADNESS(tmp))
				break;
		}
/* 重复操作直到找到适合的缓冲区 */
	} while ((tmp = tmp->b_next_free) != free_list);
// 如果所有缓冲区都正被使用(所有缓冲区的头部引用计数都>0),
// 则睡眠,等待有空闲的缓冲区可用。
	if (!bh) {
		sleep_on(&buffer_wait);
		goto repeat;
	}
	// 等待该缓冲区解锁(如果已被上锁的话)。
	wait_on_buffer(bh);
	// 如果该缓冲区又被其它任务使用的话,只好重复上述过程。
	if (bh->b_count)
		goto repeat;
// 如果该缓冲区已被修改,则将数据写盘,并再次等待缓冲区解锁。如果该缓冲区又被其它任务使用
// 的话,只好再重复上述过程。
	while (bh->b_dirt) {
		sync_dev(bh->b_dev);
		wait_on_buffer(bh);
		if (bh->b_count)
			goto repeat;
	}
/* 注意!!当进程为了等待该缓冲块而睡眠时,其它进程可能已经将该缓冲块 */
/* 加入进高速缓冲中,所以要对此进行检查。 */
// 在高速缓冲hash 表中检查指定设备和块的缓冲区是否已经被加入进去。如果是的话,就再次重复
// 上述过程。
	if (find_buffer(dev,block))
		goto repeat;
/* OK,最终我们知道该缓冲区是指定参数的唯一一块, */
/* 而且还没有被使用(b_count=0),未被上锁(b_lock=0),并且是干净的(未被修改的) */
// 于是让我们占用此缓冲区。置引用计数为1,复位修改标志和有效(更新)标志。
	bh->b_count=1;
	bh->b_dirt=0;
	bh->b_uptodate=0;
// 从hash 队列和空闲块链表中移出该缓冲区头,让该缓冲区用于指定设备和其上的指定块。
	remove_from_queues(bh);
	bh->b_dev=dev;
	bh->b_blocknr=block;
// 然后根据此新的设备号和块号重新插入空闲链表和hash 队列新位置处。并最终返回缓冲头指针。
	insert_into_queues(bh);
	return bh;
}
Exemplo n.º 19
0
struct buffer_head * getblk(dev_t dev, int block, int size)
{
	struct buffer_head * bh, * tmp;
	int buffers;
	static int grow_size = 0;

repeat:
	bh = get_hash_table(dev, block, size);
	if (bh) {
		if (bh->b_uptodate && !bh->b_dirt)
			put_last_free(bh);
		return bh;
	}
	grow_size -= size;
	if (nr_free_pages > min_free_pages && grow_size <= 0) {
		if (grow_buffers(GFP_BUFFER, size))
			grow_size = PAGE_SIZE;
	}
	buffers = nr_buffers;
	bh = NULL;

	for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
		if (tmp->b_count || tmp->b_size != size)
			continue;
		if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1)
			continue;
		if (!bh || BADNESS(tmp)<BADNESS(bh)) {
			bh = tmp;
			if (!BADNESS(tmp))
				break;
		}
#if 0
		if (tmp->b_dirt) {
			tmp->b_count++;
			ll_rw_block(WRITEA, 1, &tmp);
			tmp->b_count--;
		}
#endif
	}

	if (!bh) {
		if (nr_free_pages > 5)
			if (grow_buffers(GFP_BUFFER, size))
				goto repeat;
		if (!grow_buffers(GFP_ATOMIC, size))
			sleep_on(&buffer_wait);
		goto repeat;
	}

	wait_on_buffer(bh);
	if (bh->b_count || bh->b_size != size)
		goto repeat;
	if (bh->b_dirt) {
		sync_buffers(0,0);
		goto repeat;
	}
/* NOTE!! While we slept waiting for this block, somebody else might */
/* already have added "this" block to the cache. check it */
	if (find_buffer(dev,block,size))
		goto repeat;
/* OK, FINALLY we know that this buffer is the only one of its kind, */
/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
	bh->b_count=1;
	bh->b_dirt=0;
	bh->b_uptodate=0;
	bh->b_req=0;
	remove_from_queues(bh);
	bh->b_dev=dev;
	bh->b_blocknr=block;
	insert_into_queues(bh);
	return bh;
}
Exemplo n.º 20
0
struct buffer_head * getblk(int dev, int block, int size)
{
	struct buffer_head * bh, * tmp;
	int buffers;

repeat:
	if (bh = get_hash_table(dev, block, size))
		return bh;

	if (nr_free_pages > 30)
		grow_buffers(size);

	buffers = nr_buffers;
	bh = NULL;

	for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
		if (tmp->b_count || tmp->b_size != size)
			continue;
		if (!bh || BADNESS(tmp)<BADNESS(bh)) {
			bh = tmp;
			if (!BADNESS(tmp))
				break;
		}
#if 0
		if (tmp->b_dirt)
			ll_rw_block(WRITEA,tmp);
#endif
	}

	if (!bh && nr_free_pages > 5) {
		grow_buffers(size);
		goto repeat;
	}
	
/* and repeat until we find something good */
	if (!bh) {
		sleep_on(&buffer_wait);
		goto repeat;
	}
	wait_on_buffer(bh);
	if (bh->b_count || bh->b_size != size)
		goto repeat;
	if (bh->b_dirt) {
		sync_buffers(bh->b_dev);
		goto repeat;
	}
/* NOTE!! While we slept waiting for this block, somebody else might */
/* already have added "this" block to the cache. check it */
	if (find_buffer(dev,block,size))
		goto repeat;
/* OK, FINALLY we know that this buffer is the only one of it's kind, */
/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
	bh->b_count=1;
	bh->b_dirt=0;
	bh->b_uptodate=0;
	remove_from_queues(bh);
	bh->b_dev=dev;
	bh->b_blocknr=block;
	insert_into_queues(bh);
	return bh;
}
Exemplo n.º 21
0
static int trunc_indirect(struct inode * inode, unsigned long offset, unsigned long * p, int convert, unsigned char * dirt)
{
	unsigned long indtmp, indblock;
	struct super_block * sb;
	struct buffer_head * indbh;
	unsigned int i;
	sysv_zone_t * ind;
	unsigned long tmp, block;
	struct buffer_head * bh;
	int retry = 0;

	indblock = indtmp = *p;
	if (convert)
		indblock = from_coh_ulong(indblock);
	if (!indblock)
		return 0;
	sb = inode->i_sb;
	indbh = bread(inode->i_dev,indblock+sb->sv_block_base,BLOCK_SIZE);
	if (indtmp != *p) {
		brelse(indbh);
		return 1;
	}
	if (!indbh) {
		*p = 0;
		*dirt = 1;
		return 0;
	}
repeat:
	if (inode->i_size < offset)
		i = 0;
	else
		i = (inode->i_size - offset + BLOCK_SIZE-1) / BLOCK_SIZE;
	for (; i < IND_PER_BLOCK; i++) {
		ind = ((sysv_zone_t *) indbh->b_data) + i;
		block = tmp = *ind;
		if (sb->sv_convert)
			block = from_coh_ulong(block);
		if (!block)
			continue;
		bh = get_hash_table(inode->i_dev,block+sb->sv_block_base,BLOCK_SIZE);
		if (i*BLOCK_SIZE + offset < inode->i_size) {
			brelse(bh);
			goto repeat;
		}
		if ((bh && bh->b_count != 1) || (tmp != *ind)) {
			retry = 1;
			brelse(bh);
			continue;
		}
		*ind = 0;
		mark_buffer_dirty(indbh, 1);
		brelse(bh);
		sysv_free_block(sb,block);
	}
	for (i = 0; i < IND_PER_BLOCK; i++)
		if (((sysv_zone_t *) indbh->b_data)[i])
			goto done;
	if ((indbh->b_count != 1) || (indtmp != *p)) {
		brelse(indbh);
		return 1;
	}
	*p = 0;
	*dirt = 1;
	sysv_free_block(sb,indblock);
done:
	brelse(indbh);
	return retry;
}
Exemplo n.º 22
0
//// 取高速缓冲中指定的缓冲区。
// 检查所指定的缓冲区是否已经在高速缓冲中,如果不在,就需要在高速缓冲中建立一个对应的新项。
// 返回相应缓冲区头指针。
struct buffer_head *
getblk( int dev, int block )
{
	struct buffer_head *tmp, *bh;

repeat:
// 搜索hash 表,如果指定块已经在高速缓冲中,则返回对应缓冲区头指针,退出。
	if( bh = get_hash_table( dev, block ) )
	{
		return bh;
	}
// 扫描空闲数据块链表,寻找空闲缓冲区。
// 首先让tmp 指向空闲链表的第一个空闲缓冲区头。
	tmp = free_list;
	do
	{
// 如果该缓冲区正被使用(引用计数不等于0),则继续扫描下一项。
		if( tmp->b_count )
		{
			continue;
		}
// 如果缓冲头指针bh 为空,或者tmp 所指缓冲头的标志(修改、锁定)权重小于bh 头标志的权重,
// 则让bh 指向该tmp 缓冲区头。如果该tmp 缓冲区头表明缓冲区既没有修改也没有锁定标志置位,
// 则说明已为指定设备上的块取得对应的高速缓冲区,则退出循环。
		if( !bh || BADNESS( tmp ) < BADNESS( bh ) )
		{
			bh = tmp;
			if( !BADNESS( tmp ) )
			{
				break;
			}
		}
/* and repeat until we find something good *//* 重复操作直到找到适合的缓冲区 */
	}
	while( ( tmp = tmp->b_next_free ) != free_list );
// 如果所有缓冲区都正被使用(所有缓冲区的头部引用计数都>0),则睡眠,等待有空闲的缓冲区可用。
	if( !bh )
	{
		sleep_on( &buffer_wait );
		goto repeat;
	}
// 等待该缓冲区解锁(如果已被上锁的话)。
	wait_on_buffer( bh );
// 如果该缓冲区又被其它任务使用的话,只好重复上述过程。
	if( bh->b_count )
	{
		goto repeat;
	}
// 如果该缓冲区已被修改,则将数据写盘,并再次等待缓冲区解锁。如果该缓冲区又被其它任务使用
// 的话,只好再重复上述过程。
	while( bh->b_dirt )
	{
		sync_dev( bh->b_dev );
		wait_on_buffer( bh );
		if( bh->b_count )
		{
			goto repeat;
		}
	}
/* NOTE!! While we slept waiting for this block, somebody else might */
/* already have added "this" block to the cache. check it */
/* 注意!!当进程为了等待该缓冲块而睡眠时,其它进程可能已经将该缓冲块 */
	** /
// 在高速缓冲hash 表中检查指定设备和块的缓冲区是否已经被加入进去。如果是的话,就再次重复
// 上述过程。
	if( find_buffer( dev, block ) )
	{
		goto repeat;
	}
/* OK, FINALLY we know that this buffer is the only one of it's kind, */
/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
/* OK,最终我们知道该缓冲区是指定参数的唯一一块,*/
/* 而且还没有被使用(b_count=0),未被上锁(b_lock=0),并且是干净的(未被修改的)*/
// 于是让我们占用此缓冲区。置引用计数为1,复位修改标志和有效(更新)标志。
	bh->b_count	   = 1;
	bh->b_dirt	   = 0;
	bh->b_uptodate = 0;
// 从hash 队列和空闲块链表中移出该缓冲区头,让该缓冲区用于指定设备和其上的指定块。
	remove_from_queues( bh );
	bh->b_dev	   = dev;
	bh->b_blocknr  = block;
// 然后根据此新的设备号和块号重新插入空闲链表和hash 队列新位置处。并最终返回缓冲头指针。
	insert_into_queues( bh );
	return bh;
}
Exemplo n.º 23
0
struct buffer_head  * journal_get_hash_table (struct super_block *s, int block)
{
  return get_hash_table (SB_JOURNAL_DEV(s), block, s->s_blocksize );
}
Exemplo n.º 24
0
void filter_by_id(const char* fn, hash_table* T)
{
    fprintf(stderr, "filtering ... \n");

    samfile_t* fin = samopen(fn, "rb", NULL);
    if (fin == NULL) {
        fprintf(stderr, "can't open bam file %s\n", fn);
        exit(1);
    }

    samfile_t* fout = samopen("-", "w", (void*)fin->header);
    if (fout == NULL) {
        fprintf(stderr, "can't open stdout, for some reason.\n");
        exit(1);
    }

    fputs(fin->header->text, stdout);

    bam1_t* b = bam_init1();
    uint32_t n = 0;

    char* qname = NULL;
    size_t qname_size = 0;

    while (samread(fin, b) >= 0) {
        if (++n % 1000000 == 0) {
            fprintf(stderr, "\t%d reads\n", n);
        }


        if (qname_size < b->core.l_qname + 3) {
            qname_size = b->core.l_qname + 3;
            qname = realloc(qname, qname_size);
        }

        memcpy(qname, bam1_qname(b), b->core.l_qname);

        if (b->core.flag & BAM_FREAD2) {
            qname[b->core.l_qname]     = '/';
            qname[b->core.l_qname + 1] = '2';
            qname[b->core.l_qname + 2] = '\0';
        }
        else {
            qname[b->core.l_qname]     = '/';
            qname[b->core.l_qname + 1] = '1';
            qname[b->core.l_qname + 2] = '\0';
        }

        if (get_hash_table(T, qname, b->core.l_qname + 2) == 1) {
            samwrite(fout, b);
        }
    }

    free(qname);

    bam_destroy1(b);
    samclose(fout);
    samclose(fin);

    fprintf(stderr, "done.\n");
}
Exemplo n.º 25
0
bool Index::check_dir_existence(uint32_t dir_hash) const
{
    return (get_hash_table().find(dir_hash) != get_hash_table().end());
}
void zz_manager_texture::unbind_notset ()
{
	zz_hash_table<zz_node*>::iterator it, it_end;
	zz_hash_table<zz_node*> * nodes = get_hash_table();
	zz_texture * tex;

	if (!nodes) return;

	zz_camera * cam = znzin->get_camera();

	if (cam) {
		if (cam->get_speed() > 0) {
			// now, camera is moving, so do not unbind textures
			//ZZ_LOG("manager_texture: camera is moving\n");
			return;
		}
	}

	unsigned int max_tex_mem = znzin->renderer->get_max_texmem() >> 20; // byte -> megabyte
	unsigned int ava_tex_mem = znzin->renderer->get_available_texmem() >> 20;
	const unsigned int suff_tex_mem = 20; // sufficient available texture memory 20 MB
	
#if !defined(UNBIND_ALL)
	if (ava_tex_mem > suff_tex_mem) {
		if ((ava_tex_mem * 3) > max_tex_mem) {
			return;
		}
	}
#endif

	static zz_time s_accum_time = 0; // accumulated time
	const zz_time max_accum_time = ZZ_MSEC_TO_TIME(1000); // default 1000 msec

	s_accum_time += znzin->get_diff_time();

	// we do not unbind in every frame. we do only after some time elapsed
	// and the ratio is (ava_tex_mem / max_tex_mem)
	if (s_accum_time*max_tex_mem < max_accum_time*ava_tex_mem) // s_accum_time < max_accum_time * (ava_tex_mem / max_tex_mem)
		return;

	zz_time current_time = znzin->get_current_time();
	const zz_time MAX_DIFF_TIME = ZZ_MSEC_TO_TIME(10000); // default 10 second

	texlist.clear();

	for (it = nodes->begin(), it_end = nodes->end(); it != it_end; ++it) {
		tex = static_cast<zz_texture*>(*it);

		if (tex->get_lock_texture()) // skip texture that was locked for some reason
			continue;

		if (tex->get_last_settime() == current_time) // skip if it was used in the current frame
			continue;

		if (!tex->get_device_updated()) // skip if it was not device-ready
			continue;

		if (entrance_line.find(tex)) // skip if it is manager-controlled. it will be flushed by manager queue mechanism
			continue;

		if (exit_line.find(tex)) // skip if it is manager-controlled. it will be flushed by manager queue mechanism
			continue;

		texlist.push_back(tex); // insert the texture into the texlist
	}

	if (texlist.empty()) // skip if the texlist is empty
		return;

	texture_settime_compare texture_cf; // declare texture comparision function

	// sort texture list by last_settime
	std::sort(texlist.begin(), texlist.end(), texture_cf);

	tex = *texlist.begin(); // consider only the first
	assert(tex);

	// skip if the texture is recently used.
	if ((current_time - tex->get_last_settime()) < MAX_DIFF_TIME)
		return;

	s_accum_time = 0; // intialize accum_time

	tex->unbind_device();
	tex->update_last_settime();
}
Exemplo n.º 27
0
//检查指定(设备号和块号)的缓冲区是否已经在高速缓冲中。如果指定块已经在高速缓冲中,则返回
//对应缓冲区头指针退出;如果不在,就需要在高速缓冲中设置一个对应设备号和块号的新项。返回相应
//缓冲区头指针
struct buffer_head * getblk(int dev,int block)
{
	struct buffer_head * tmp, * bh;

repeat:
	if ( (bh = get_hash_table(dev,block)) )
		return bh;
	tmp = free_list;
	do {
		if (tmp->b_count)
			continue;
		if (!bh || BADNESS(tmp)<BADNESS(bh)) {
			bh = tmp;
			if (!BADNESS(tmp))
				break;
		}
/* and repeat until we find something good */
	} while ((tmp = tmp->b_next_free) != free_list);
	
	//如果循环检查所有缓冲块都正在被使用(所有缓冲块的头部引用计数都>0)中,则睡眠等待有空闲
	//缓冲块可用。当有空闲块可用时本进程会被明确地唤醒。然后我们就跳转到函数开始处重新查找空闲
	//缓冲块
	if (!bh) {
		sleep_on(&buffer_wait);
		goto repeat;
	}
	
	//如果跑到这里,说明已经找到一个空闲的缓冲区块。于是先等待该缓冲区解锁(如果已经被上锁的话)。
	wait_on_buffer(bh);
	if (bh->b_count)
		goto repeat;
	
	//如果该缓冲区已被修改,则将数据写盘,并再次等待缓冲区解锁。
	while (bh->b_dirt) {
		sync_dev(bh->b_dev);
		wait_on_buffer(bh);
		if (bh->b_count)
			goto repeat;
	}
/* NOTE!! While we slept waiting for this block, somebody else might */
/* already have added "this" block to the cache. check it */
	//当进程为了等待该缓冲块而睡眠时,其他进程可能已经将该缓冲块进入高速缓冲中,因此我们也要对此进行检查
	if (find_buffer(dev,block))
		goto repeat;
/* OK, FINALLY we know that this buffer is the only one of it's kind, */
/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
	
	//到了这里,最终我们知道该缓冲块是指定参数的唯一一块,而且目前还没有被占用(b_count=0)
	//也没有被上锁(b_lock=0),而且是干净的(b_dirt=0)
	//于是我们占用此缓冲块。置引用计数为1,复位修改标志和有效标志
	bh->b_count=1;
	bh->b_dirt=0;
	bh->b_uptodate=0;
	//从hash队列和空闲块链表中移出该缓冲区头,让该缓冲区用于指定设备和其上的指定块。然后根据
	//此新的设备号和块号重新插入空闲链表和hash队列新位置处。并最终返回缓冲头指针。
	remove_from_queues(bh);
	bh->b_dev=dev;
	bh->b_blocknr=block;
	insert_into_queues(bh);
	return bh;
}