/* * This initializes the initial buffer free list. nr_buffers is set * to one less the actual number of buffers, as a sop to backwards * compatibility --- the old code did this (I think unintentionally, * but I'm not sure), and programs in the ps package expect it. * - TYT 8/30/92 */ void buffer_init(void) { int i; for (i = 0 ; i < NR_HASH ; i++) hash_table[i] = NULL; free_list = 0; grow_buffers(BLOCK_SIZE); if (!free_list) panic("Unable to initialize buffer free list!"); return; }
/* * This initializes the initial buffer free list. nr_buffers is set * to one less the actual number of buffers, as a sop to backwards * compatibility --- the old code did this (I think unintentionally, * but I'm not sure), and programs in the ps package expect it. * - TYT 8/30/92 */ void buffer_init(void) { int i; if (high_memory >= 4*1024*1024) min_free_pages = 200; else min_free_pages = 20; for (i = 0 ; i < NR_HASH ; i++) hash_table[i] = NULL; free_list = 0; grow_buffers(GFP_KERNEL, BLOCK_SIZE); if (!free_list) panic("VFS: Unable to initialize buffer free list!"); return; }
struct buffer_head * getblk(int dev, int block, int size) { struct buffer_head * bh, * tmp; int buffers; repeat: if (bh = get_hash_table(dev, block, size)) return bh; if (nr_free_pages > 30) grow_buffers(size); buffers = nr_buffers; bh = NULL; for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) { if (tmp->b_count || tmp->b_size != size) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } #if 0 if (tmp->b_dirt) ll_rw_block(WRITEA,tmp); #endif } if (!bh && nr_free_pages > 5) { grow_buffers(size); goto repeat; } /* and repeat until we find something good */ if (!bh) { sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if (bh->b_count || bh->b_size != size) goto repeat; if (bh->b_dirt) { sync_buffers(bh->b_dev); goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ if (find_buffer(dev,block,size)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of it's kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }
struct buffer_head * getblk(dev_t dev, int block, int size) { struct buffer_head * bh, * tmp; int buffers; static int grow_size = 0; repeat: bh = get_hash_table(dev, block, size); if (bh) { if (bh->b_uptodate && !bh->b_dirt) put_last_free(bh); return bh; } grow_size -= size; if (nr_free_pages > min_free_pages && grow_size <= 0) { if (grow_buffers(GFP_BUFFER, size)) grow_size = PAGE_SIZE; } buffers = nr_buffers; bh = NULL; for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) { if (tmp->b_count || tmp->b_size != size) continue; if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } #if 0 if (tmp->b_dirt) { tmp->b_count++; ll_rw_block(WRITEA, 1, &tmp); tmp->b_count--; } #endif } if (!bh) { if (nr_free_pages > 5) if (grow_buffers(GFP_BUFFER, size)) goto repeat; if (!grow_buffers(GFP_ATOMIC, size)) sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if (bh->b_count || bh->b_size != size) goto repeat; if (bh->b_dirt) { sync_buffers(0,0); goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ if (find_buffer(dev,block,size)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of its kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; bh->b_req=0; remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }