struct inode *__iget(register struct super_block *sb, ino_t inr /*,int crossmntp */ ) { int i; register struct inode *inode; register struct inode *empty = NULL; debug3("iget called(%x, %d, %d)\n", sb, inr, 0 /* crossmntp */ ); if (!sb) panic("VFS: iget with sb==NULL"); repeat: inode = inode_block; for (i = NR_INODE; i; i--, inode++) { if (inode->i_dev == sb->s_dev && inode->i_ino == inr) { goto found_it; } } if (!empty) { debug("iget: getting an empty inode...\n"); empty = get_empty_inode(); debug1("iget: got one... (%x)!\n", empty); if (empty) goto repeat; return NULL; } inode = empty; inode->i_sb = sb; inode->i_dev = sb->s_dev; inode->i_ino = inr; inode->i_flags = ((unsigned short int) sb->s_flags); put_last_free(inode); debug("iget: Reading inode\n"); read_inode(inode); debug("iget: Read it\n"); goto return_it; found_it: if (!inode->i_count) nr_free_inodes--; inode->i_count++; wait_on_inode(inode); if (inode->i_dev != sb->s_dev || inode->i_ino != inr) { printk("Whee.. inode changed from under us. Tell _.\n"); iput(inode); goto repeat; } if ( /* crossmntp && */ inode->i_mount) { struct inode *tmp = inode->i_mount; tmp->i_count++; iput(inode); inode = tmp; wait_on_inode(inode); } if (empty) iput(empty); return_it: return inode; }
/* * Find an unused file structure and return a pointer to it. * Returns NULL, if there are no more free file structures or * we run out of memory. */ struct file * get_empty_filp(void) { int i; int max = max_files; struct file * f; /* * Reserve a few files for the super-user.. */ if (current->euid) max -= 10; /* if the return is taken, we are in deep trouble */ if (!first_file && !grow_files()) return NULL; do { for (f = first_file, i=0; i < nr_files; i++, f = f->f_next) if (!f->f_count) { remove_file_free(f); memset(f,0,sizeof(*f)); put_last_free(f); f->f_count = 1; f->f_version = ++event; return f; } } while (nr_files < max && grow_files()); return NULL; }
/* * Try to free up some pages by shrinking the buffer-cache * * Priority tells the routine how hard to try to shrink the * buffers: 3 means "don't bother too much", while a value * of 0 means "we'd better get some free pages now". */ int shrink_buffers(unsigned int priority) { struct buffer_head *bh; int i; if (priority < 2) sync_buffers(0,0); bh = free_list; i = nr_buffers >> priority; for ( ; i-- > 0 ; bh = bh->b_next_free) { if (bh->b_count || (priority >= 5 && mem_map[MAP_NR((unsigned long) bh->b_data)] > 1)) { put_last_free(bh); continue; } if (!bh->b_this_page) continue; if (bh->b_lock) if (priority) continue; else wait_on_buffer(bh); if (bh->b_dirt) { bh->b_count++; ll_rw_block(WRITEA, 1, &bh); bh->b_count--; continue; } if (try_to_free(bh, &bh)) return 1; } return 0; }
/* * Why like this, I hear you say... The reason is race-conditions. * As we don't lock buffers (unless we are readint them, that is), * something might happen to it while we sleep (ie a read-error * will force it bad). This shouldn't really happen currently, but * the code is ready. */ struct buffer_head * get_hash_table(int dev, int block, int size) { struct buffer_head * bh; for (;;) { if (!(bh=find_buffer(dev,block,size))) return NULL; bh->b_count++; wait_on_buffer(bh); if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) { put_last_free(bh); return bh; } bh->b_count--; } }
struct file * get_empty_filp(void) { int i; struct file * f; if (!first_file) grow_files(); repeat: for (f = first_file, i=0; i < nr_files; i++, f = f->f_next) if (!f->f_count) { remove_file_free(f); memset(f,0,sizeof(*f)); put_last_free(f); f->f_count = 1; return f; } if (nr_files < NR_FILE) { grow_files(); goto repeat; } return NULL; }
struct buffer_head * getblk(dev_t dev, int block, int size) { struct buffer_head * bh, * tmp; int buffers; static int grow_size = 0; repeat: bh = get_hash_table(dev, block, size); if (bh) { if (bh->b_uptodate && !bh->b_dirt) put_last_free(bh); return bh; } grow_size -= size; if (nr_free_pages > min_free_pages && grow_size <= 0) { if (grow_buffers(GFP_BUFFER, size)) grow_size = PAGE_SIZE; } buffers = nr_buffers; bh = NULL; for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) { if (tmp->b_count || tmp->b_size != size) continue; if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1) continue; if (!bh || BADNESS(tmp)<BADNESS(bh)) { bh = tmp; if (!BADNESS(tmp)) break; } #if 0 if (tmp->b_dirt) { tmp->b_count++; ll_rw_block(WRITEA, 1, &tmp); tmp->b_count--; } #endif } if (!bh) { if (nr_free_pages > 5) if (grow_buffers(GFP_BUFFER, size)) goto repeat; if (!grow_buffers(GFP_ATOMIC, size)) sleep_on(&buffer_wait); goto repeat; } wait_on_buffer(bh); if (bh->b_count || bh->b_size != size) goto repeat; if (bh->b_dirt) { sync_buffers(0,0); goto repeat; } /* NOTE!! While we slept waiting for this block, somebody else might */ /* already have added "this" block to the cache. check it */ if (find_buffer(dev,block,size)) goto repeat; /* OK, FINALLY we know that this buffer is the only one of its kind, */ /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */ bh->b_count=1; bh->b_dirt=0; bh->b_uptodate=0; bh->b_req=0; remove_from_queues(bh); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh; }