Beispiel #1
0
asmlinkage int sys_idle(void)
{
	unsigned long start_idle = 0;

	if (current->pid != 0)
		return -EPERM;

	/* endless idle loop with no priority at all */
	current->priority = 0;
	current->counter = 0;
	for (;;) {
		/*
		 * R4[36]00 have wait, R4[04]00 don't.
		 * FIXME: We should save power by reducing the clock where
		 *        possible.  Thiss will cut down the power consuption
		 *        of R4200 systems to about 1/16th of normal, the
		 *        same for logic clocked with the processor generated
		 *        clocks.
		 */
		if (!start_idle) {
			check_pgt_cache();
			start_idle = jiffies;
		}
		if (wait_available && !current->need_resched)
			__asm__(".set\tmips3\n\t"
				"wait\n\t"
				".set\tmips0");
		run_task_queue(&tq_scheduler);
		if (current->need_resched)
			start_idle = 0;
		schedule();
	}

	return 0;
}
Beispiel #2
0
static int context_thread(void *startup)
{
	struct task_struct *curtask = current;
	DECLARE_WAITQUEUE(wait, curtask);
	struct k_sigaction sa;

	daemonize();
	strcpy(curtask->comm, "keventd");
	current->flags |= PF_IOTHREAD;
	keventd_running = 1;
	keventd_task = curtask;

	spin_lock_irq(&curtask->sigmask_lock);
	siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
	recalc_sigpending(curtask);
	spin_unlock_irq(&curtask->sigmask_lock);

	complete((struct completion *)startup);

	/* Install a handler so SIGCLD is delivered */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);

	/*
	 * If one of the functions on a task queue re-adds itself
	 * to the task queue we call schedule() in state TASK_RUNNING
	 */
	for (;;) {
		set_task_state(curtask, TASK_INTERRUPTIBLE);
		add_wait_queue(&context_task_wq, &wait);
		if (TQ_ACTIVE(tq_context))
			set_task_state(curtask, TASK_RUNNING);
		schedule();
		remove_wait_queue(&context_task_wq, &wait);
		run_task_queue(&tq_context);
		wake_up(&context_task_done);
		if (signal_pending(curtask)) {
			while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
				;
			spin_lock_irq(&curtask->sigmask_lock);
			flush_signals(curtask);
			recalc_sigpending(curtask);
			spin_unlock_irq(&curtask->sigmask_lock);
		}
	}
}
Beispiel #3
0
static void __flush_batch(struct buffer_head **bhs, int *batch_count)
{
	int i;

	spin_unlock(&journal_datalist_lock);
	ll_rw_block(WRITE, *batch_count, bhs);
	run_task_queue(&tq_disk);
	spin_lock(&journal_datalist_lock);
	for (i = 0; i < *batch_count; i++) {
		struct buffer_head *bh = bhs[i];
		clear_bit(BH_JWrite, &bh->b_state);
		BUFFER_TRACE(bh, "brelse");
		__brelse(bh);
	}
	*batch_count = 0;
}
Beispiel #4
0
/*
 * The background pageout daemon, started as a kernel thread
 * from the init process. 
 *
 * This basically trickles out pages so that we have _some_
 * free memory available even if there is no other activity
 * that frees anything up. This is needed for things like routing
 * etc, where we otherwise might have all activity going on in
 * asynchronous contexts that cannot page things out.
 *
 * If there are applications that are active memory-allocators
 * (most normal use), this basically shouldn't matter.
 */
int kswapd(void *unused)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	daemonize();
	strcpy(tsk->comm, "kswapd");
	sigfillset(&tsk->blocked);
	
	/*
	 * Tell the memory management that we're a "memory allocator",
	 * and that if we need more memory we should get access to it
	 * regardless (see "__alloc_pages()"). "kswapd" should
	 * never get caught in the normal page freeing logic.
	 *
	 * (Kswapd normally doesn't need memory anyway, but sometimes
	 * you need a small amount of memory in order to be able to
	 * page out something else, and this flag essentially protects
	 * us from recursively trying to free more memory as we're
	 * trying to free the first piece of memory in the first place).
	 */
	tsk->flags |= PF_MEMALLOC;

	/*
	 * Kswapd main loop.
	 */
	for (;;) {
		__set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&kswapd_wait, &wait);

		mb();
		if (kswapd_can_sleep())
			schedule();

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&kswapd_wait, &wait);

		/*
		 * If we actually get into a low-memory situation,
		 * the processes needing more memory will wake us
		 * up on a more timely basis.
		 */
		kswapd_balance();
		run_task_queue(&tq_disk);
	}
}
Beispiel #5
0
/* when we allocate a new block (get_new_buffer, get_empty_nodes) and
   get buffer for it, it is possible that it is held by someone else
   or even by this process. In this function we wait until all other
   holders release buffer. To make sure, that current process does not
   hold we did free all buffers in tree balance structure
   (get_empty_nodes and get_nodes_for_preserving) or in path structure
   only (get_new_buffer) just before calling this */
void wait_buffer_until_released (const struct buffer_head * bh)
{
  int repeat_counter = 0;

  while (atomic_read (&(bh->b_count)) > 1) {

    if ( !(++repeat_counter % 30000000) ) {
      reiserfs_warning (NULL, "vs-3050: wait_buffer_until_released: nobody releases buffer (%b). Still waiting (%d) %cJDIRTY %cJWAIT\n",
			bh, repeat_counter, buffer_journaled(bh) ? ' ' : '!',
			buffer_journal_dirty(bh) ? ' ' : '!');
    }
    run_task_queue(&tq_disk);
    yield();
  }
  if (repeat_counter > 30000000) {
    reiserfs_warning(NULL, "vs-3051: done waiting, ignore vs-3050 messages for (%b)\n", bh) ;
  }
}
/*
 * This routine is used to handle the "bottom half" processing for the
 * serial driver, known also the "software interrupt" processing.
 * This processing is done at the kernel interrupt level, after the
 * xmbrs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON.  This
 * is where time-consuming activities which can not be done in the
 * interrupt driver proper are done; the interrupt driver schedules
 * them using xmbrs_sched_event(), and they get done here.
 */
static void do_serial_bh(void)
{
	run_task_queue(&xmb_tq_serial);
}
Beispiel #7
0
/*
 * Reads or writes a swap page.
 * wait=1: start I/O and wait for completion. wait=0: start asynchronous I/O.
 *
 * Important prevention of race condition: The first thing we do is set a lock
 * on this swap page, which lasts until I/O completes. This way a
 * write_swap_page(entry) immediately followed by a read_swap_page(entry)
 * on the same entry will first complete the write_swap_page(). Fortunately,
 * not more than one write_swap_page() request can be pending per entry. So
 * all races the caller must catch are: multiple read_swap_page() requests
 * on the same entry.
 */
void rw_swap_page(int rw, unsigned long entry, char * buf, int wait)
{
	unsigned long type, offset;
	struct swap_info_struct * p;
	struct page *page;
	
	type = SWP_TYPE(entry);
	if (type >= nr_swapfiles) {
		printk("Internal error: bad swap-device\n");
		return;
	}
	p = &swap_info[type];
	offset = SWP_OFFSET(entry);
	if (offset >= p->max) {
		printk("rw_swap_page: weirdness\n");
		return;
	}
	if (p->swap_map && !p->swap_map[offset]) {
		printk("Hmm.. Trying to use unallocated swap (%08lx)\n", entry);
		return;
	}
	if (!(p->flags & SWP_USED)) {
		printk("Trying to swap to unused swap-device\n");
		return;
	}
	/* Make sure we are the only process doing I/O with this swap page. */
	while (set_bit(offset,p->swap_lockmap)) {
		run_task_queue(&tq_disk);
		sleep_on(&lock_queue);
	}
	if (rw == READ)
		kstat.pswpin++;
	else
		kstat.pswpout++;
	page = mem_map + MAP_NR(buf);
	atomic_inc(&page->count);
	wait_on_page(page);
	if (p->swap_device) {
		if (!wait) {
			set_bit(PG_free_after, &page->flags);
			set_bit(PG_decr_after, &page->flags);
			set_bit(PG_swap_unlock_after, &page->flags);
			page->swap_unlock_entry = entry;
			atomic_inc(&nr_async_pages);
		}
		ll_rw_page(rw,p->swap_device,offset,buf);
		/*
		 * NOTE! We don't decrement the page count if we
		 * don't wait - that will happen asynchronously
		 * when the IO completes.
		 */
		if (!wait)
			return;
		wait_on_page(page);
	} else if (p->swap_file) {
		struct inode *swapf = p->swap_file;
		unsigned int zones[PAGE_SIZE/512];
		int i;
		if (swapf->i_op->bmap == NULL
			&& swapf->i_op->smap != NULL){
			/*
				With MsDOS, we use msdos_smap which return
				a sector number (not a cluster or block number).
				It is a patch to enable the UMSDOS project.
				Other people are working on better solution.

				It sounds like ll_rw_swap_file defined
				it operation size (sector size) based on
				PAGE_SIZE and the number of block to read.
				So using bmap or smap should work even if
				smap will require more blocks.
			*/
			int j;
			unsigned int block = offset << 3;

			for (i=0, j=0; j< PAGE_SIZE ; i++, j += 512){
				if (!(zones[i] = swapf->i_op->smap(swapf,block++))) {
					printk("rw_swap_page: bad swap file\n");
					return;
				}
			}
		}else{
			int j;
			unsigned int block = offset
				<< (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits);

			for (i=0, j=0; j< PAGE_SIZE ; i++, j +=swapf->i_sb->s_blocksize)
				if (!(zones[i] = bmap(swapf,block++))) {
					printk("rw_swap_page: bad swap file\n");
				}
		}
		ll_rw_swap_file(rw,swapf->i_dev, zones, i,buf);
	} else
		printk("rw_swap_page: no swap file or device\n");
	atomic_dec(&page->count);
	if (offset && !clear_bit(offset,p->swap_lockmap))
		printk("rw_swap_page: lock already cleared\n");
	wake_up(&lock_queue);
}
Beispiel #8
0
/*
 * not really used in our situation so keep them commented out for now
 */
static DECLARE_TASK_QUEUE(tq_serial); /* used to be at the top of the file */
static void do_serial_bh(void)
{
    run_task_queue(&tq_serial);
    printk(KERN_ERR "do_serial_bh: called\n");
}
Beispiel #9
0
/*
 * The background pageout daemon, started as a kernel thread
 * from the init process. 
 *
 * This basically trickles out pages so that we have _some_
 * free memory available even if there is no other activity
 * that frees anything up. This is needed for things like routing
 * etc, where we otherwise might have all activity going on in
 * asynchronous contexts that cannot page things out.
 *
 * If there are applications that are active memory-allocators
 * (most normal use), this basically shouldn't matter.
 */
int kswapd(void *unused)
{
	struct task_struct *tsk = current;

	tsk->session = 1;
	tsk->pgrp = 1;
	strcpy(tsk->comm, "kswapd");
	sigfillset(&tsk->blocked);
	kswapd_task = tsk;
	
	/*
	 * Tell the memory management that we're a "memory allocator",
	 * and that if we need more memory we should get access to it
	 * regardless (see "__alloc_pages()"). "kswapd" should
	 * never get caught in the normal page freeing logic.
	 *
	 * (Kswapd normally doesn't need memory anyway, but sometimes
	 * you need a small amount of memory in order to be able to
	 * page out something else, and this flag essentially protects
	 * us from recursively trying to free more memory as we're
	 * trying to free the first piece of memory in the first place).
	 */
	tsk->flags |= PF_MEMALLOC;

	/*
	 * Kswapd main loop.
	 */
	for (;;) {
		static int recalc = 0;

		/* If needed, try to free some memory. */
		if (inactive_shortage() || free_shortage()) {
			int wait = 0;
			/* Do we need to do some synchronous flushing? */
			if (waitqueue_active(&kswapd_done))
				wait = 1;
			do_try_to_free_pages(GFP_KSWAPD, wait);
		}

		/*
		 * Do some (very minimal) background scanning. This
		 * will scan all pages on the active list once
		 * every minute. This clears old referenced bits
		 * and moves unused pages to the inactive list.
		 */
		refill_inactive_scan(6, 0);

		/* Once a second, recalculate some VM stats. */
		if (time_after(jiffies, recalc + HZ)) {
			recalc = jiffies;
			recalculate_vm_stats();
		}

		/*
		 * Wake up everybody waiting for free memory
		 * and unplug the disk queue.
		 */
		wake_up_all(&kswapd_done);
		run_task_queue(&tq_disk);

		/* 
		 * We go to sleep if either the free page shortage
		 * or the inactive page shortage is gone. We do this
		 * because:
		 * 1) we need no more free pages   or
		 * 2) the inactive pages need to be flushed to disk,
		 *    it wouldn't help to eat CPU time now ...
		 *
		 * We go to sleep for one second, but if it's needed
		 * we'll be woken up earlier...
		 */
		if (!free_shortage() || !inactive_shortage()) {
			interruptible_sleep_on_timeout(&kswapd_wait, HZ);
		/*
		 * If we couldn't free enough memory, we see if it was
		 * due to the system just not having enough memory.
		 * If that is the case, the only solution is to kill
		 * a process (the alternative is enternal deadlock).
		 *
		 * If there still is enough memory around, we just loop
		 * and try free some more memory...
		 */
		} else if (out_of_memory()) {
			oom_kill();
		}
	}
}
Beispiel #10
0
/* immediate bh added */
void
immediate_bh(void)
{
	run_task_queue(&tq_immediate);
}
Beispiel #11
0
void
tqueue_bh(void)
{
	run_task_queue(&tq_timer);
}
Beispiel #12
0
static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, int wait)
{
	unsigned long type, offset;
	struct swap_info_struct * p;
	int zones[PAGE_SIZE/512];
	int zones_used;
	kdev_t dev = 0;
	int block_size;

#ifdef DEBUG_SWAP
	printk ("DebugVM: %s_swap_page entry %08lx, page %p (count %d), %s\n",
		(rw == READ) ? "read" : "write", 
		entry, (char *) page_address(page), atomic_read(&page->count),
		wait ? "wait" : "nowait");
#endif

	type = SWP_TYPE(entry);
	if (type >= nr_swapfiles) {
		printk("Internal error: bad swap-device\n");
		return;
	}

	/* Don't allow too many pending pages in flight.. */
	if (atomic_read(&nr_async_pages) > pager_daemon.swap_cluster)
		wait = 1;

	p = &swap_info[type];
	offset = SWP_OFFSET(entry);
	if (offset >= p->max) {
		printk("rw_swap_page: weirdness\n");
		return;
	}
	if (p->swap_map && !p->swap_map[offset]) {
		printk(KERN_ERR "rw_swap_page: "
			"Trying to %s unallocated swap (%08lx)\n", 
			(rw == READ) ? "read" : "write", entry);
		return;
	}
	if (!(p->flags & SWP_USED)) {
		printk(KERN_ERR "rw_swap_page: "
			"Trying to swap to unused swap-device\n");
		return;
	}

	if (!PageLocked(page)) {
		printk(KERN_ERR "VM: swap page is unlocked\n");
		return;
	}

	if (PageSwapCache(page)) {
		/* Make sure we are the only process doing I/O with this swap page. */
		if (test_and_set_bit(offset, p->swap_lockmap))
		{
			struct wait_queue __wait;
			
			__wait.task = current;
			add_wait_queue(&lock_queue, &__wait);
			for (;;) {
				current->state = TASK_UNINTERRUPTIBLE;
				mb();
				if (!test_and_set_bit(offset, p->swap_lockmap))
					break;
				run_task_queue(&tq_disk);
				schedule();
			}
			current->state = TASK_RUNNING;
			remove_wait_queue(&lock_queue, &__wait);
		}

		/* 
		 * Make sure that we have a swap cache association for this
		 * page.  We need this to find which swap page to unlock once
		 * the swap IO has completed to the physical page.  If the page
		 * is not already in the cache, just overload the offset entry
		 * as if it were: we are not allowed to manipulate the inode
		 * hashing for locked pages.
		 */
		if (page->offset != entry) {
			printk ("swap entry mismatch");
			return;
		}
	}
	if (rw == READ) {
		clear_bit(PG_uptodate, &page->flags);
		kstat.pswpin++;
	} else
		kstat.pswpout++;

	atomic_inc(&page->count);
	if (p->swap_device) {
		zones[0] = offset;
		zones_used = 1;
		dev = p->swap_device;
		block_size = PAGE_SIZE;
	} else if (p->swap_file) {
		struct inode *swapf = p->swap_file->d_inode;
		int i;
		if (swapf->i_op->bmap == NULL
			&& swapf->i_op->smap != NULL){
			/*
				With MS-DOS, we use msdos_smap which returns
				a sector number (not a cluster or block number).
				It is a patch to enable the UMSDOS project.
				Other people are working on better solution.

				It sounds like ll_rw_swap_file defined
				its operation size (sector size) based on
				PAGE_SIZE and the number of blocks to read.
				So using bmap or smap should work even if
				smap will require more blocks.
			*/
			int j;
			unsigned int block = offset << 3;

			for (i=0, j=0; j< PAGE_SIZE ; i++, j += 512){
				if (!(zones[i] = swapf->i_op->smap(swapf,block++))) {
					printk("rw_swap_page: bad swap file\n");
					return;
				}
			}
			block_size = 512;
		}else{
			int j;
			unsigned int block = offset
				<< (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits);

			block_size = swapf->i_sb->s_blocksize;
			for (i=0, j=0; j< PAGE_SIZE ; i++, j += block_size)
				if (!(zones[i] = bmap(swapf,block++))) {
					printk("rw_swap_page: bad swap file\n");
					return;
				}
			zones_used = i;
			dev = swapf->i_dev;
		}
	} else {
		printk(KERN_ERR "rw_swap_page: no swap file or device\n");
		/* Do some cleaning up so if this ever happens we can hopefully
		 * trigger controlled shutdown.
		 */
		if (PageSwapCache(page)) {
			if (!test_and_clear_bit(offset,p->swap_lockmap))
				printk("swap_after_unlock_page: lock already cleared\n");
			wake_up(&lock_queue);
		}
		atomic_dec(&page->count);
		return;
	}
 	if (!wait) {
 		set_bit(PG_decr_after, &page->flags);
 		atomic_inc(&nr_async_pages);
 	}
 	if (PageSwapCache(page)) {
 		/* only lock/unlock swap cache pages! */
 		set_bit(PG_swap_unlock_after, &page->flags);
 	}
 	set_bit(PG_free_after, &page->flags);

 	/* block_size == PAGE_SIZE/zones_used */
 	brw_page(rw, page, dev, zones, block_size, 0);
 
 	/* Note! For consistency we do all of the logic,
 	 * decrementing the page count, and unlocking the page in the
 	 * swap lock map - in the IO completion handler.
 	 */
 	if (!wait) 
 		return;
 	wait_on_page(page);
	/* This shouldn't happen, but check to be sure. */
	if (atomic_read(&page->count) == 0)
		printk(KERN_ERR "rw_swap_page: page unused while waiting!\n");

#ifdef DEBUG_SWAP
	printk ("DebugVM: %s_swap_page finished on page %p (count %d)\n",
		(rw == READ) ? "read" : "write", 
		(char *) page_adddress(page), 
		atomic_read(&page->count));
#endif
}
/*
 * This routine is used to handle the "bottom half" processing for the
 * serial driver, known also the "software interrupt" processing.
 * This processing is done at the kernel interrupt level, after the
 * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON.  This
 * is where time-consuming activities which can not be done in the
 * interrupt driver proper are done; the interrupt driver schedules
 * them using rs_sched_event(), and they get done here.
 */
static void do_serial_bh(void)
{
	run_task_queue(&tq_s3c3410_serial);
}