Пример #1
0
static int kauditd_thread(void *dummy)
{
	struct sk_buff *skb;

	while (1) {
		skb = skb_dequeue(&audit_skb_queue);
		wake_up(&audit_backlog_wait);
		if (skb) {
			if (audit_pid) {
				int err = netlink_unicast(audit_sock, skb, audit_pid, 0);
				if (err < 0) {
					BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
					printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
					audit_pid = 0;
				}
			} else {
				printk(KERN_NOTICE "%s\n", skb->data + NLMSG_SPACE(0));
				kfree_skb(skb);
			}
		} else {
			DECLARE_WAITQUEUE(wait, current);
			set_current_state(TASK_INTERRUPTIBLE);
			add_wait_queue(&kauditd_wait, &wait);

			if (!skb_queue_len(&audit_skb_queue)) {
				try_to_freeze();
				schedule();
			}

			__set_current_state(TASK_RUNNING);
			remove_wait_queue(&kauditd_wait, &wait);
		}
	}

	return 0; /* shut up gcc */
}
Пример #2
0
/**
 * finish_wait - clean up after waiting in a queue
 * @q: waitqueue waited on
 * @wait: wait descriptor
 *
 * Sets current thread back to running state and removes
 * the wait descriptor from the given waitqueue if still
 * queued.
 */
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
	unsigned long flags;

	__set_current_state(TASK_RUNNING);
	/*
	 * We can check for list emptiness outside the lock
	 * IFF:
	 *  - we use the "careful" check that verifies both
	 *    the next and prev pointers, so that there cannot
	 *    be any half-pending updates in progress on other
	 *    CPU's that we haven't seen yet (and that might
	 *    still change the stack area.
	 * and
	 *  - all other users take the lock (ie we can only
	 *    have _one_ other CPU that looks at or modifies
	 *    the list).
	 */
	if (!list_empty_careful(&wait->task_list)) {
		spin_lock_irqsave(&q->lock, flags);
		list_del_init(&wait->task_list);
		spin_unlock_irqrestore(&q->lock, flags);
	}
}
Пример #3
0
int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_MEMORY]);

	current->flags |= PF_NOFREEZE;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
Пример #4
0
static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
{
	const struct efx_spi_device *spi = efx_mtd->spi;
	struct efx_nic *efx = efx_mtd->efx;
	u8 status;
	int rc, i;

	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
	for (i = 0; i < 40; i++) {
		__set_current_state(uninterruptible ?
				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
		schedule_timeout(HZ / 10);
		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
				    &status, sizeof(status));
		if (rc)
			return rc;
		if (!(status & SPI_STATUS_NRDY))
			return 0;
		if (signal_pending(current))
			return -EINTR;
	}
	EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
	return -ETIMEDOUT;
}
Пример #5
0
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
	struct restart_block *restart;

	hrtimer_init_sleeper(t, current);

	do {
		set_current_state(TASK_INTERRUPTIBLE);
		hrtimer_start_expires(&t->timer, mode);

		if (likely(t->task))
			freezable_schedule();

		hrtimer_cancel(&t->timer);
		mode = HRTIMER_MODE_ABS;

	} while (t->task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);

	if (!t->task)
		return 0;

	restart = &current->restart_block;
	if (restart->nanosleep.type != TT_NONE) {
		ktime_t rem = hrtimer_expires_remaining(&t->timer);
		struct timespec64 rmt;

		if (rem <= 0)
			return 0;
		rmt = ktime_to_timespec64(rem);

		return nanosleep_copyout(restart, &rmt);
	}
	return -ERESTART_RESTARTBLOCK;
}
Пример #6
0
static int devtmpfsd(void *p)
{
	char options[] = "mode=0755";
	int *err = p;
	*err = sys_unshare(CLONE_NEWNS);
	if (*err)
		goto out;
	*err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
	if (*err)
		goto out;
	sys_chdir("/.."); /* will traverse into overmounted root */
	sys_chroot(".");
	complete(&setup_done);
	while (1) {
		spin_lock(&req_lock);
		while (requests) {
			struct req *req = requests;
			requests = NULL;
			spin_unlock(&req_lock);
			while (req) {
				struct req *next = req->next;
				req->err = handle(req->name, req->mode, req->dev);
				complete(&req->done);
				req = next;
			}
			spin_lock(&req_lock);
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		spin_unlock(&req_lock);
		schedule();
	}
	return 0;
out:
	complete(&setup_done);
	return *err;
}
Пример #7
0
int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_user_nice(tsk, KTHREAD_NICE_LEVEL);
	set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
Пример #8
0
// ARM10C 20170830
// x: &kthreadd_done, action: schedule_timeout, timeout: 0x7FFFFFFF, state: 2
static inline long __sched
do_wait_for_common(struct completion *x,
		   long (*action)(long), long timeout, int state)
{
	// x->done: (&kthreadd_done)->done: 0
	if (!x->done) {
		// current: kmem_cache#15-oX (struct task_struct) (pid: 1)
		DECLARE_WAITQUEUE(wait, current);
		// DECLARE_WAITQUEUE(wait, kmem_cache#15-oX (struct task_struct) (pid: 1)):
		// wait_queue_t wait =
		// {
		//     .private	= kmem_cache#15-oX (struct task_struct) (pid: 1),
		//     .func = default_wake_function,
		//     .task_list = { NULL, NULL }
		// }

		// &x->wait: &(&kthreadd_done)->wait
		__add_wait_queue_tail_exclusive(&x->wait, &wait);

		// __add_wait_queue_tail_exclusive 에서 한일:
		// (&wait)->flags: ? | 0x01
		// &(&(&kthreadd_done)->wait)->task_list 과 (&(&(&kthreadd_done)->wait)->task_list)->prev 사이에 &(&wait)->task_list 를 추가함
		// 간단히 말하면 head 인 &(&(&kthreadd_done)->wait)->task_list 의 tail에 &(&wait)->task_list 가 추가됨
		//
		// (&(&(&kthreadd_done)->wait)->task_list)->prev = &(&wait)->task_list;
		// (&(&wait)->task_list)->next = &(&(&kthreadd_done)->wait)->task_list;
		// (&(&wait)->task_list)->prev = &(&(&kthreadd_done)->wait)->task_list;
		// (&(&(&kthreadd_done)->wait)->task_list)->next = &(&wait)->task_list;

// 2017/08/30 종료
// 2017/09/06 시작

		do {
			// state: 2, current: kmem_cache#15-oX (struct task_struct) (pid: 1)
			// signal_pending_state(2, kmem_cache#15-oX (struct task_struct) (pid: 1)): 0
			if (signal_pending_state(state, current)) {
				timeout = -ERESTARTSYS;
				break;
			}

			// state: 2
			__set_current_state(state);

			// __set_current_state 에서 한일:
			// (kmem_cache#15-oX (struct task_struct) (pid: 1))->state: 2

			// &x->wait.lock: &(&kthreadd_done)->wait.lock
			spin_unlock_irq(&x->wait.lock);

			// spin_unlock_irq 에서 한일:
			// &(&kthreadd_done)->wait.lock 을 사용하여 spin unlock 을 수행

			// action: schedule_timeout, timeout: 0x7FFFFFFF
			timeout = action(timeout);
			spin_lock_irq(&x->wait.lock);
		} while (!x->done && timeout);
		__remove_wait_queue(&x->wait, &wait);
		if (!x->done)
			return timeout;
	}
	x->done--;
	return timeout ?: 1;
}
Пример #9
0
/**
 * n_hdlc_tty_read - Called to retrieve one frame of data (if available)
 * @tty - pointer to tty instance data
 * @file - pointer to open file object
 * @buf - pointer to returned data buffer
 * @nr - size of returned data buffer
 * 	
 * Returns the number of bytes returned or error code.
 */
static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
			   __u8 __user *buf, size_t nr)
{
	struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
	int ret = 0;
	struct n_hdlc_buf *rbuf;
	DECLARE_WAITQUEUE(wait, current);

	if (debuglevel >= DEBUG_LEVEL_INFO)	
		printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
		
	/* Validate the pointers */
	if (!n_hdlc)
		return -EIO;

	/* verify user access to buffer */
	if (!access_ok(VERIFY_WRITE, buf, nr)) {
		printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user "
		"buffer\n", __FILE__, __LINE__);
		return -EFAULT;
	}

	add_wait_queue(&tty->read_wait, &wait);

	for (;;) {
		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
			ret = -EIO;
			break;
		}
		if (tty_hung_up_p(file))
			break;

		set_current_state(TASK_INTERRUPTIBLE);

		rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
		if (rbuf) {
			if (rbuf->count > nr) {
				/* too large for caller's buffer */
				ret = -EOVERFLOW;
			} else {
				if (copy_to_user(buf, rbuf->buf, rbuf->count))
					ret = -EFAULT;
				else
					ret = rbuf->count;
			}

			if (n_hdlc->rx_free_buf_list.count >
			    DEFAULT_RX_BUF_COUNT)
				kfree(rbuf);
			else
				n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
			break;
		}
			
		/* no data */
		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			break;
		}

		schedule();

		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
	}

	remove_wait_queue(&tty->read_wait, &wait);
	__set_current_state(TASK_RUNNING);

	return ret;
	
}	/* end of n_hdlc_tty_read() */
Пример #10
0
/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 * If we're over `background_thresh' then the writeback threads are woken to
 * perform some writeout.
 */
static void balance_dirty_pages(struct address_space *mapping,
				unsigned long write_chunk)
{
	long nr_reclaimable, bdi_nr_reclaimable;
	long nr_writeback, bdi_nr_writeback;
	long ub_dirty, ub_writeback;
	long ub_thresh, ub_background_thresh;
	unsigned long background_thresh;
	unsigned long dirty_thresh;
	unsigned long bdi_thresh;
	unsigned long pages_written = 0;
	unsigned long pause = 1;
	struct user_beancounter *ub = get_io_ub();

	struct backing_dev_info *bdi = mapping->backing_dev_info;

	for (;;) {
		struct writeback_control wbc = {
			.sync_mode	= WB_SYNC_NONE,
			.older_than_this = NULL,
			.nr_to_write	= write_chunk,
			.range_cyclic	= 1,
		};

		get_dirty_limits(&background_thresh, &dirty_thresh,
				&bdi_thresh, bdi);

		if (ub_dirty_limits(&ub_background_thresh, &ub_thresh, ub)) {
			ub_dirty = ub_stat_get(ub, dirty_pages);
			ub_writeback = ub_stat_get(ub, writeback_pages);
		} else {
			ub_dirty = ub_writeback = 0;
			ub_thresh = ub_background_thresh = LONG_MAX / 2;
		}

		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
		nr_writeback = global_page_state(NR_WRITEBACK);

		bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
		bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);

		/*
		 * Check thresholds, set dirty_exceeded flags and
		 * start background writeback before throttling.
		 */
		if (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) {
			if (!bdi->dirty_exceeded)
				bdi->dirty_exceeded = 1;
			if (!writeback_in_progress(bdi))
				bdi_start_background_writeback(bdi, NULL);
		} else if (ub_dirty + ub_writeback > ub_thresh) {
			if (!test_bit(UB_DIRTY_EXCEEDED, &ub->ub_flags))
				set_bit(UB_DIRTY_EXCEEDED, &ub->ub_flags);
			if (!writeback_in_progress(bdi))
				bdi_start_background_writeback(bdi, ub);
		} else
			break;

		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
		 * when the bdi limits are ramping up.
		 */
		if (bdi_cap_account_writeback(bdi) &&
		    nr_reclaimable + nr_writeback <
				(background_thresh + dirty_thresh) / 2 &&
		    ub_dirty + ub_writeback <
				(ub_background_thresh + ub_thresh) / 2)
			break;

		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 * Only move pages to writeback if this bdi is over its
		 * threshold otherwise wait until the disk writes catch
		 * up.
		 */
		trace_wbc_balance_dirty_start(&wbc, bdi);
		if (bdi_nr_reclaimable > bdi_thresh) {
			writeback_inodes_wb(&bdi->wb, &wbc);
			pages_written += write_chunk - wbc.nr_to_write;
			trace_wbc_balance_dirty_written(&wbc, bdi);
			get_dirty_limits(&background_thresh, &dirty_thresh,
				       &bdi_thresh, bdi);
		} else if (ub_dirty > ub_thresh) {
			wbc.wb_ub = ub;
			writeback_inodes_wb(&bdi->wb, &wbc);
			pages_written += write_chunk - wbc.nr_to_write;
			trace_wbc_balance_dirty_written(&wbc, bdi);
			ub_dirty = ub_stat_get(ub, dirty_pages);
			ub_writeback = ub_stat_get(ub, writeback_pages);
			wbc.wb_ub = NULL;
		}

		/*
		 * In order to avoid the stacked BDI deadlock we need
		 * to ensure we accurately count the 'dirty' pages when
		 * the threshold is low.
		 *
		 * Otherwise it would be possible to get thresh+n pages
		 * reported dirty, even though there are thresh-m pages
		 * actually dirty; with m+n sitting in the percpu
		 * deltas.
		 */
		if (bdi_thresh < 2*bdi_stat_error(bdi)) {
			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
		} else if (bdi_nr_reclaimable) {
			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
		}

		/* fixup ub-stat per-cpu drift to avoid false-positive */
		if (ub_dirty + ub_writeback > ub_thresh &&
		    ub_dirty + ub_writeback - ub_thresh <
				    UB_STAT_BATCH * num_possible_cpus()) {
			ub_dirty = ub_stat_get_exact(ub, dirty_pages);
			ub_writeback = ub_stat_get_exact(ub, writeback_pages);
		}

		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh &&
		    ub_dirty + ub_writeback <= ub_thresh)
			break;

		if (pages_written >= write_chunk)
			break;		/* We've done our duty */

		trace_wbc_balance_dirty_wait(&wbc, bdi);
		__set_current_state(TASK_KILLABLE);
		io_schedule_timeout(pause);

		/*
		 * Increase the delay for each loop, up to our previous
		 * default of taking a 100ms nap.
		 */
		pause <<= 1;
		if (pause > HZ / 10)
			pause = HZ / 10;

		if (fatal_signal_pending(current))
			break;
	}

	if(pages_written) trace_mm_balancedirty_writeout(pages_written);
	if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
			bdi->dirty_exceeded)
		bdi->dirty_exceeded = 0;

	if (ub_dirty + ub_writeback < ub_thresh &&
	    test_bit(UB_DIRTY_EXCEEDED, &ub->ub_flags))
		clear_bit(UB_DIRTY_EXCEEDED, &ub->ub_flags);

	virtinfo_notifier_call(VITYPE_IO, VIRTINFO_IO_BALANCE_DIRTY,
			       (void*)write_chunk);

	/*
	 * Even if this is filtered writeback for other ub it will write
	 * inodes for this ub, because ub->dirty_exceeded is set.
	 */
	if (writeback_in_progress(bdi))
		return;

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
	if ((laptop_mode && pages_written) ||
	    (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
			       + global_page_state(NR_UNSTABLE_NFS))
					  > background_thresh)))
		bdi_start_background_writeback(bdi, NULL);
	else if ((laptop_mode && pages_written) ||
		 (!laptop_mode && ub_dirty > ub_background_thresh))
		bdi_start_background_writeback(bdi, ub);
}

void set_page_dirty_balance(struct page *page, int page_mkwrite)
{
	if (set_page_dirty(page) || page_mkwrite) {
		struct address_space *mapping = page_mapping(page);

		if (mapping)
			balance_dirty_pages_ratelimited(mapping);
	}
}

static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;

/**
 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 * @mapping: address_space which was dirtied
 * @nr_pages_dirtied: number of pages which the caller has just dirtied
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
					unsigned long nr_pages_dirtied)
{
	unsigned long ratelimit;
	unsigned long *p;

	ratelimit = ratelimit_pages;
	if (mapping->backing_dev_info->dirty_exceeded ||
	    test_bit(UB_DIRTY_EXCEEDED, &get_io_ub()->ub_flags))
		ratelimit = 8;

	/*
	 * Check the rate limiting. Also, we do not want to throttle real-time
	 * tasks in balance_dirty_pages(). Period.
	 */
	preempt_disable();
	p =  &__get_cpu_var(bdp_ratelimits);
	*p += nr_pages_dirtied;
	if (unlikely(*p >= ratelimit)) {
		ratelimit = sync_writeback_pages(*p);
		*p = 0;
		preempt_enable();
		balance_dirty_pages(mapping, ratelimit);
		return;
	}
	preempt_enable();
}
Пример #11
0
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
			 unsigned char __user *buf, size_t nr)
{
	unsigned char __user *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	int minimum, time;
	ssize_t retval = 0;
	ssize_t size;
	long timeout;
	unsigned long flags;
	int packet;

do_it_again:

	BUG_ON(!tty->read_buf);

	c = job_control(tty, file);
	if (c < 0)
		return c;

	minimum = time = 0;
	timeout = MAX_SCHEDULE_TIMEOUT;
	if (!tty->icanon) {
		time = (HZ / 10) * TIME_CHAR(tty);
		minimum = MIN_CHAR(tty);
		if (minimum) {
			if (time)
				tty->minimum_to_wake = 1;
			else if (!waitqueue_active(&tty->read_wait) ||
				 (tty->minimum_to_wake > minimum))
				tty->minimum_to_wake = minimum;
		} else {
			timeout = 0;
			if (time) {
				timeout = time;
				time = 0;
			}
			tty->minimum_to_wake = minimum = 1;
		}
	}

	if (file->f_flags & O_NONBLOCK) {
		if (!mutex_trylock(&tty->atomic_read_lock))
			return -EAGAIN;
	} else {
		if (mutex_lock_interruptible(&tty->atomic_read_lock))
			return -ERESTARTSYS;
	}
	packet = tty->packet;

	add_wait_queue(&tty->read_wait, &wait);
	while (nr) {
		
		if (packet && tty->link->ctrl_status) {
			unsigned char cs;
			if (b != buf)
				break;
			spin_lock_irqsave(&tty->link->ctrl_lock, flags);
			cs = tty->link->ctrl_status;
			tty->link->ctrl_status = 0;
			spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
			if (tty_put_user(tty, cs, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
			break;
		}
		set_current_state(TASK_INTERRUPTIBLE);

		if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
		    ((minimum - (b - buf)) >= 1))
			tty->minimum_to_wake = (minimum - (b - buf));

		if (!input_available_p(tty, 0)) {
			if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
				retval = -EIO;
				break;
			}
			if (tty_hung_up_p(file))
				break;
			if (!timeout)
				break;
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				retval = -ERESTARTSYS;
				break;
			}
			n_tty_set_room(tty);
			timeout = schedule_timeout(timeout);
			BUG_ON(!tty->read_buf);
			continue;
		}
		__set_current_state(TASK_RUNNING);

		
		if (packet && b == buf) {
			if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
		}

		if (tty->icanon && !L_EXTPROC(tty)) {
			
			while (nr && tty->read_cnt) {
				int eol;

				eol = test_and_clear_bit(tty->read_tail,
						tty->read_flags);
				c = tty->read_buf[tty->read_tail];
				spin_lock_irqsave(&tty->read_lock, flags);
				tty->read_tail = ((tty->read_tail+1) &
						  (N_TTY_BUF_SIZE-1));
				tty->read_cnt--;
				if (eol) {
					if (--tty->canon_data < 0)
						tty->canon_data = 0;
				}
				spin_unlock_irqrestore(&tty->read_lock, flags);

				if (!eol || (c != __DISABLED_CHAR)) {
					if (tty_put_user(tty, c, b++)) {
						retval = -EFAULT;
						b--;
						break;
					}
					nr--;
				}
				if (eol) {
					tty_audit_push(tty);
					break;
				}
			}
			if (retval)
				break;
		} else {
			int uncopied;
			uncopied = copy_from_read_buf(tty, &b, &nr);
			uncopied += copy_from_read_buf(tty, &b, &nr);
			if (uncopied) {
				retval = -EFAULT;
				break;
			}
		}

		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
			n_tty_set_room(tty);
			check_unthrottle(tty);
		}

		if (b - buf >= minimum)
			break;
		if (time)
			timeout = time;
	}
	mutex_unlock(&tty->atomic_read_lock);
	remove_wait_queue(&tty->read_wait, &wait);

	if (!waitqueue_active(&tty->read_wait))
		tty->minimum_to_wake = minimum;

	__set_current_state(TASK_RUNNING);
	size = b - buf;
	if (size) {
		retval = size;
		if (nr)
			clear_bit(TTY_PUSH, &tty->flags);
	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
		goto do_it_again;

	n_tty_set_room(tty);
	return retval;
}
Пример #12
0
int
#else
void
#endif
sis1100_irq_thread(void* data)
{
    struct sis1100_softc* sc=(struct sis1100_softc*)data;
    enum handlercomm command;
    DECLARE_SPINLOCKFLAGS(flags);

#ifdef __linux__
#if LINUX_VERSION_CODE < 0x20600
    daemonize();
    snprintf(current->comm, sizeof(current->comm), "sis1100_%02d", sc->unit);
    SPIN_LOCK_IRQSAVE(current->sigmask_lock, flags);
    sigemptyset(&current->blocked);
    recalc_sigpending(current);
    SPIN_UNLOCK_IRQRESTORE(current->sigmask_lock, flags);
#endif
#endif /*__linux__*/

    while (1) {
#ifdef __NetBSD__
        tsleep(&sc->handler_wait, PCATCH, "thread_vmeirq", 0);
#elif __linux__
        /* prepare to sleep */
	__set_current_state(TASK_INTERRUPTIBLE);
        /* don't sleep if command!=0 */
        if (sc->handlercommand.command)
	    __set_current_state(TASK_RUNNING);
        else
            schedule();
#endif

        if (kthread_should_stop())
            return 0;

        SPIN_LOCK_IRQSAVE(sc->handlercommand.lock, flags);
        command=sc->handlercommand.command;
        sc->handlercommand.command=0;
        SPIN_UNLOCK_IRQRESTORE(sc->handlercommand.lock, flags);

#if 0
        pERROR(sc, "irq_thread: command=0x%x", command);
#endif

        _sis1100_irq_thread(sc, command);

#ifdef __linux__
	if (signal_pending (current)) {
	    SPIN_LOCK_IRQSAVE(current->SIGMASK_LOCK, flags);
	    flush_signals(current);
	    SPIN_UNLOCK_IRQRESTORE(current->SIGMASK_LOCK, flags);
	}
#endif /*__linux__*/

    }
#ifdef __linux__
    return 0;
#endif
}
Пример #13
0
signed long schedule_timeout_interruptible(signed long timeout)
{
	__set_current_state(TASK_INTERRUPTIBLE);
	return schedule_timeout(timeout);
}
Пример #14
0
/*globalfifo写操作*/
static ssize_t globalfifo_write(struct file *filp, const char __user *buf,
  size_t count, loff_t *ppos)
{
  struct globalfifo_dev *dev = filp->private_data; //获得设备结构体指针
  int ret;
  DECLARE_WAITQUEUE(wait, current); //定义等待队列

  down(&dev->sem); //获取信号量
  add_wait_queue(&dev->w_wait, &wait); //进入写等待队列头

  /* 等待FIFO非满 */
  if (dev->current_len == GLOBALFIFO_SIZE)
  {
    if (filp->f_flags &O_NONBLOCK)
    //如果是非阻塞访问
    {
      ret =  - EAGAIN;
      goto out;
    } 
    __set_current_state(TASK_INTERRUPTIBLE); //改变进程状态为睡眠
    up(&dev->sem);

    schedule(); //调度其他进程执行
    if (signal_pending(current))
    //如果是因为信号唤醒
    {
      ret =  - ERESTARTSYS;
      goto out2;
    }

    down(&dev->sem); //获得信号量
  }

  /*从用户空间拷贝到内核空间*/
  if (count > GLOBALFIFO_SIZE - dev->current_len)
    count = GLOBALFIFO_SIZE - dev->current_len;

  if (copy_from_user(dev->mem + dev->current_len, buf, count))
  {
    ret =  - EFAULT;
    goto out;
  }
  else
  {
    dev->current_len += count;
    printk(KERN_INFO "written %d bytes(s),current_len:%d\n", count, dev
      ->current_len);

    wake_up_interruptible(&dev->r_wait); //唤醒读等待队列
    /* 产生异步读信号 */
    if (dev->async_queue)
       kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
    
    ret = count;
  }

  out: up(&dev->sem); //释放信号量
  out2:remove_wait_queue(&dev->w_wait, &wait); //从附属的等待队列头移除
  set_current_state(TASK_RUNNING);
  return ret;
}
Пример #15
0
static ssize_t cmmbmemo_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
    struct cmmb_memory *cmmbmemo = (struct cmmb_memory*)file->private_data;
    ssize_t avail_V, avail_A, avail_D;
    ssize_t ret;
    
    DBG("[CMMB HW]:[memory]:enter cmmb memory read\n");
    
    if (cmmbmemo->r_datatype == CMMB_VIDEO_TYPE){
#if 0         
        DECLARE_WAITQUEUE(wait, current);
        for(;;){
            avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
             
            if (avail_V < count){          
                add_wait_queue(&cmmbmemo->buffer_Video.queue, &wait);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                remove_wait_queue(&cmmbmemo->buffer_Video.queue, &wait);
                if (signal_pending(current)){
                   ret = -ERESTARTSYS;
                   goto out2;
                }
            }
        }
#else
#if 0
        avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
        while (avail_V < count){
            DBG("[CMMB HW]:[memory]:cmmb memory read video data sleep!!\n");
            spin_lock(cmmbmemo->buffer_Video.lock);
            cmmbmemo->buffer_Video.condition = 0;
            spin_unlock(cmmbmemo->buffer_Video.lock);
            if (wait_event_interruptible(cmmbmemo->buffer_Video.queue, cmmbmemo->buffer_Video.condition))
                return -ERESTARTSYS;
            
            avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
            DBG("[CMMB HW]:[memory]:cmmb memory read video data awake\n");
        }
#endif 
	    avail_V = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Video);
	    if (avail_V < count)  
	    	return 0;     
#endif          
        ret = cmmb_ringbuffer_read(&cmmbmemo->buffer_Video, buf, count, 1);   
     
        DBG("[CMMB HW]:[memory]:cmmb memory video read ret = 0x%x\n",ret);
    }else if (cmmbmemo->r_datatype == CMMB_AUDIO_TYPE){
#if 0
        DECLARE_WAITQUEUE(wait, current);
        for(;;){
            avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);
            if (avail_A < count){
                add_wait_queue(&cmmbmemo->buffer_Audio.queue, &wait);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                remove_wait_queue(&cmmbmemo->buffer_Audio.queue, &wait);
                if (signal_pending(current)){
                    ret = -ERESTARTSYS;
                    goto out2;
                }
            }
        }
#else
#if 0
        avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);
        while (avail_A < count){
            DBG("[CMMB HW]:[memory]:cmmb memory read audio data sleep!!\n");
            spin_lock(cmmbmemo->buffer_Audio.lock);
            cmmbmemo->buffer_Audio.condition = 0;
            spin_unlock(cmmbmemo->buffer_Audio.lock);
            if (wait_event_interruptible(cmmbmemo->buffer_Audio.queue, cmmbmemo->buffer_Audio.condition))
                return -ERESTARTSYS;
            
            avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);
            DBG("[CMMB HW]:[memory]:cmmb memory read audio data awake\n");
        }
#endif
		avail_A = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Audio);  
		if (avail_A < count)  
			return 0;    
#endif
        ret = cmmb_ringbuffer_read(&cmmbmemo->buffer_Audio, buf, count, 1);
    }else if(cmmbmemo->r_datatype == CMMB_DATA_TYPE){
 #if 0   
        DECLARE_WAITQUEUE(wait, current);
        for(;;){
           avail_D = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);
           if (avail_D < count){
               add_wait_queue(&cmmbmemo->buffer_Data.queue, &wait);
               __set_current_state(TASK_INTERRUPTIBLE);
               schedule();
               remove_wait_queue(&cmmbmemo->buffer_Data.queue, &wait);
               if (signal_pending(current)){
                   ret = -ERESTARTSYS;
                   goto out2;
               }
           }
        }
#else
#if 0
        avail_D = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);
        while (avail_D < count){
        DBG("[CMMB HW]:[memory]:cmmb memory read data sleep!!\n");
        spin_lock(cmmbmemo->buffer_Data.lock);
        cmmbmemo->buffer_Data.condition = 0;
        spin_unlock(cmmbmemo->buffer_Data.lock);
        if (wait_event_interruptible(cmmbmemo->buffer_Data.queue, cmmbmemo->buffer_Data.condition))
            return -ERESTARTSYS;
        
        avail_D= cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);
        DBG("[CMMB HW]:[memory]:cmmb memory read data awake\n");
        }
#endif
		avail_D = cmmb_ringbuffer_avail(&cmmbmemo->buffer_Data);  
		if (avail_D < count)  
			return 0;    	        
#endif
        ret = cmmb_ringbuffer_read(&cmmbmemo->buffer_Data, buf, count, 1);
    }
    
out2:
    cmmbmemo->r_datatype = CMMB_NULL_TYPE;
    return ret;;
}
Пример #16
0
static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) {
	poll_table wait_table;
	struct file *file;
	struct socket *sock;
	int init_timeout;
	size_t dataread;
	int result = 0;
	
	file = server->ncp_filp;
	sock = &file->f_dentry->d_inode->u.socket_i;
	
	dataread = 0;

	init_timeout = server->m.time_out * 20;
	
	/* hard-mounted volumes have no timeout, except connection close... */
	if (!(server->m.flags & NCP_MOUNT_SOFT))
		init_timeout = 0x7FFF0000;

	while (len) {
		poll_initwait(&wait_table);
		/* mb() is not necessary because ->poll() will serialize
		   instructions adding the wait_table waitqueues in the
		   waitqueue-head before going to calculate the mask-retval. */
		__set_current_state(TASK_INTERRUPTIBLE);
		if (!(sock->ops->poll(file, sock, &wait_table) & POLLIN)) {
			init_timeout = schedule_timeout(init_timeout);
			poll_freewait(&wait_table);
			current->state = TASK_RUNNING;
			if (signal_pending(current)) {
				return -ERESTARTSYS;
			}
			if (!init_timeout) {
				return -EIO;
			}
			if(wait_table.error) {
				return wait_table.error;
			}
		} else {
			poll_freewait(&wait_table);
		}
		current->state = TASK_RUNNING;

		result = _recv(sock, buffer, len, MSG_DONTWAIT);
		if (result < 0) {
			if (result == -EAGAIN) {
				DDPRINTK("ncpfs: tcp: bad select ready\n");
				continue;
			}
			return result;
		}
		if (result == 0) {
			printk(KERN_ERR "ncpfs: tcp: EOF on socket\n");
			return -EIO;
		}
		if (result > len) {
			printk(KERN_ERR "ncpfs: tcp: bug in recvmsg\n");
			return -EIO;			
		}
		dataread += result;
		buffer += result;
		len -= result;
	}
	return 0;
}	
Пример #17
0
static int do_ncp_rpc_call(struct ncp_server *server, int size,
		struct ncp_reply_header* reply_buf, int max_reply_size)
{
	struct file *file;
	struct socket *sock;
	int result;
	char *start = server->packet;
	poll_table wait_table;
	int init_timeout, max_timeout;
	int timeout;
	int retrans;
	int major_timeout_seen;
	int acknowledge_seen;
	int n;

	/* We have to check the result, so store the complete header */
	struct ncp_request_header request =
	*((struct ncp_request_header *) (server->packet));

	struct ncp_reply_header reply;

	file = server->ncp_filp;
	sock = &file->f_dentry->d_inode->u.socket_i;

	init_timeout = server->m.time_out;
	max_timeout = NCP_MAX_RPC_TIMEOUT;
	retrans = server->m.retry_count;
	major_timeout_seen = 0;
	acknowledge_seen = 0;

	for (n = 0, timeout = init_timeout;; n++, timeout <<= 1) {
		/*
		DDPRINTK("ncpfs: %08lX:%02X%02X%02X%02X%02X%02X:%04X\n",
			 htonl(server->m.serv_addr.sipx_network),
			 server->m.serv_addr.sipx_node[0],
			 server->m.serv_addr.sipx_node[1],
			 server->m.serv_addr.sipx_node[2],
			 server->m.serv_addr.sipx_node[3],
			 server->m.serv_addr.sipx_node[4],
			 server->m.serv_addr.sipx_node[5],
			 ntohs(server->m.serv_addr.sipx_port));
		*/
		DDPRINTK("ncpfs: req.typ: %04X, con: %d, "
			 "seq: %d",
			 request.type,
			 (request.conn_high << 8) + request.conn_low,
			 request.sequence);
		DDPRINTK(" func: %d\n",
			 request.function);

		result = _send(sock, (void *) start, size);
		if (result < 0) {
			printk(KERN_ERR "ncp_rpc_call: send error = %d\n", result);
			break;
		}
	      re_select:
		poll_initwait(&wait_table);
		/* mb() is not necessary because ->poll() will serialize
		   instructions adding the wait_table waitqueues in the
		   waitqueue-head before going to calculate the mask-retval. */
		__set_current_state(TASK_INTERRUPTIBLE);
		if (!(sock->ops->poll(file, sock, &wait_table) & POLLIN)) {
			int timed_out;
			if (timeout > max_timeout) {
				/* JEJB/JSP 2/7/94
				 * This is useful to see if the system is
				 * hanging */
				if (acknowledge_seen == 0) {
					printk(KERN_WARNING "NCP max timeout\n");
				}
				timeout = max_timeout;
			}
			timed_out = !schedule_timeout(timeout);
			poll_freewait(&wait_table);
			current->state = TASK_RUNNING;
			if (signal_pending(current)) {
				result = -ERESTARTSYS;
				break;
			}
			if(wait_table.error) {
				result = wait_table.error;
				break;
			}
			if (timed_out) {
				if (n < retrans)
					continue;
				if (server->m.flags & NCP_MOUNT_SOFT) {
					printk(KERN_WARNING "NCP server not responding\n");
					result = -EIO;
					break;
				}
				n = 0;
				timeout = init_timeout;
				if (init_timeout < max_timeout)
					init_timeout <<= 1;
				if (!major_timeout_seen) {
					printk(KERN_WARNING "NCP server not responding\n");
				}
				major_timeout_seen = 1;
				continue;
			}
		} else {
			poll_freewait(&wait_table);
		}
		current->state = TASK_RUNNING;

		/* Get the header from the next packet using a peek, so keep it
		 * on the recv queue.  If it is wrong, it will be some reply
		 * we don't now need, so discard it */
		result = _recv(sock, (void *) &reply, sizeof(reply),
			       MSG_PEEK | MSG_DONTWAIT);
		if (result < 0) {
			if (result == -EAGAIN) {
				DDPRINTK("ncp_rpc_call: bad select ready\n");
				goto re_select;
			}
			if (result == -ECONNREFUSED) {
				DPRINTK("ncp_rpc_call: server playing coy\n");
				goto re_select;
			}
			if (result != -ERESTARTSYS) {
				printk(KERN_ERR "ncp_rpc_call: recv error = %d\n",
				       -result);
			}
			break;
		}
		if ((result == sizeof(reply))
		    && (reply.type == NCP_POSITIVE_ACK)) {
			/* Throw away the packet */
			DPRINTK("ncp_rpc_call: got positive acknowledge\n");
			_recv(sock, (void *) &reply, sizeof(reply),
			      MSG_DONTWAIT);
			n = 0;
			timeout = max_timeout;
			acknowledge_seen = 1;
			goto re_select;
		}
		DDPRINTK("ncpfs: rep.typ: %04X, con: %d, tsk: %d,"
			 "seq: %d\n",
			 reply.type,
			 (reply.conn_high << 8) + reply.conn_low,
			 reply.task,
			 reply.sequence);

		if ((result >= sizeof(reply))
		    && (reply.type == NCP_REPLY)
		    && ((request.type == NCP_ALLOC_SLOT_REQUEST)
			|| ((reply.sequence == request.sequence)
			    && (reply.conn_low == request.conn_low)
/* seem to get wrong task from NW311 && (reply.task      == request.task) */
			    && (reply.conn_high == request.conn_high)))) {
			if (major_timeout_seen)
				printk(KERN_NOTICE "NCP server OK\n");
			break;
		}
		/* JEJB/JSP 2/7/94
		 * we have xid mismatch, so discard the packet and start
		 * again.  What a hack! but I can't call recvfrom with
		 * a null buffer yet. */
		_recv(sock, (void *) &reply, sizeof(reply), MSG_DONTWAIT);

		DPRINTK("ncp_rpc_call: reply mismatch\n");
		goto re_select;
	}
	/* 
	 * we have the correct reply, so read into the correct place and
	 * return it
	 */
	result = _recv(sock, (void *)reply_buf, max_reply_size, MSG_DONTWAIT);
	if (result < 0) {
		printk(KERN_WARNING "NCP: notice message: result=%d\n", result);
	} else if (result < sizeof(struct ncp_reply_header)) {
		printk(KERN_ERR "NCP: just caught a too small read memory size..., "
		       "email to NET channel\n");
		printk(KERN_ERR "NCP: result=%d\n", result);
		result = -EIO;
	}

	return result;
}
Пример #18
0
void ide_config(dev_link_t *link)
{
    client_handle_t handle = link->handle;
    ide_info_t *info = link->priv;
    tuple_t tuple;
    u_short buf[128];
    cisparse_t parse;
    config_info_t conf;
    cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
    cistpl_cftable_entry_t dflt = { 0 };
    int i, pass, last_ret, last_fn, hd, is_kme = 0;
    unsigned long io_base, ctl_base;

    DEBUG(0, "ide_config(0x%p)\n", link);
    
    tuple.TupleData = (cisdata_t *)buf;
    tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
    tuple.Attributes = 0;
    tuple.DesiredTuple = CISTPL_CONFIG;
    CS_CHECK(GetFirstTuple, handle, &tuple);
    CS_CHECK(GetTupleData, handle, &tuple);
    CS_CHECK(ParseTuple, handle, &tuple, &parse);
    link->conf.ConfigBase = parse.config.base;
    link->conf.Present = parse.config.rmask[0];

    tuple.DesiredTuple = CISTPL_MANFID;
    if (!CardServices(GetFirstTuple, handle, &tuple) &&
	!CardServices(GetTupleData, handle, &tuple) &&
	!CardServices(ParseTuple, handle, &tuple, &parse))
	is_kme = ((parse.manfid.manf == MANFID_KME) &&
		  ((parse.manfid.card == PRODID_KME_KXLC005_A) ||
		   (parse.manfid.card == PRODID_KME_KXLC005_B)));

    /* Configure card */
    link->state |= DEV_CONFIG;

    /* Not sure if this is right... look up the current Vcc */
    CS_CHECK(GetConfigurationInfo, handle, &conf);
    link->conf.Vcc = conf.Vcc;
    
    pass = io_base = ctl_base = 0;
    tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
    tuple.Attributes = 0;
    CS_CHECK(GetFirstTuple, handle, &tuple);
    while (1) {
	CFG_CHECK(GetTupleData, handle, &tuple);
	CFG_CHECK(ParseTuple, handle, &tuple, &parse);

	/* Check for matching Vcc, unless we're desperate */
	if (!pass) {
	    if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) {
		if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000)
		    goto next_entry;
	    } else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM)) {
		if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM]/10000)
		    goto next_entry;
	    }
	}
	
	if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
	    link->conf.Vpp1 = link->conf.Vpp2 =
		cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
	else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
	    link->conf.Vpp1 = link->conf.Vpp2 =
		dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
	
	if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
	    cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
	    link->conf.ConfigIndex = cfg->index;
	    link->io.BasePort1 = io->win[0].base;
	    link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
	    if (!(io->flags & CISTPL_IO_16BIT))
		link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
	    if (io->nwin == 2) {
		link->io.NumPorts1 = 8;
		link->io.BasePort2 = io->win[1].base;
		link->io.NumPorts2 = (is_kme) ? 2 : 1;
		CFG_CHECK(RequestIO, link->handle, &link->io);
		io_base = link->io.BasePort1;
		ctl_base = link->io.BasePort2;
	    } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
		link->io.NumPorts1 = io->win[0].len;
		link->io.NumPorts2 = 0;
		CFG_CHECK(RequestIO, link->handle, &link->io);
		io_base = link->io.BasePort1;
		ctl_base = link->io.BasePort1+0x0e;
	    } else goto next_entry;
	    /* If we've got this far, we're done */
	    break;
	}
	
    next_entry:
	if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
	if (pass) {
	    CS_CHECK(GetNextTuple, handle, &tuple);
	} else if (CardServices(GetNextTuple, handle, &tuple) != 0) {
	    CS_CHECK(GetFirstTuple, handle, &tuple);
	    memset(&dflt, 0, sizeof(dflt));
	    pass++;
	}
    }
    
    CS_CHECK(RequestIRQ, handle, &link->irq);
    CS_CHECK(RequestConfiguration, handle, &link->conf);

    /* deal with brain dead IDE resource management */
    release_region(link->io.BasePort1, link->io.NumPorts1);
    if (link->io.NumPorts2)
	release_region(link->io.BasePort2, link->io.NumPorts2);

    /* disable drive interrupts during IDE probe */
    outb(0x02, ctl_base);

    /* special setup for KXLC005 card */
    if (is_kme) outb(0x81, ctl_base+1);

    /* retry registration in case device is still spinning up */
    for (hd = -1, i = 0; i < 10; i++) {
	hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ);
	if (hd >= 0) break;
	if (link->io.NumPorts1 == 0x20) {
	    outb(0x02, ctl_base+0x10);
	    hd = idecs_register(io_base+0x10, ctl_base+0x10,
				link->irq.AssignedIRQ);
	    if (hd >= 0) {
		io_base += 0x10; ctl_base += 0x10;
		break;
	    }
	}
	__set_current_state(TASK_UNINTERRUPTIBLE);
	schedule_timeout(HZ/10);
    }
    
    if (hd < 0) {
	printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx"
	       ", irq %u failed\n", io_base, ctl_base,
	       link->irq.AssignedIRQ);
	goto failed;
    }

    MOD_INC_USE_COUNT;
    info->ndev = 1;
    sprintf(info->node.dev_name, "hd%c", 'a'+(hd*2));
    info->node.major = ide_major[hd];
    info->node.minor = 0;
    info->hd = hd;
    link->dev = &info->node;
    printk(KERN_INFO "ide-cs: %s: Vcc = %d.%d, Vpp = %d.%d\n",
	   info->node.dev_name, link->conf.Vcc/10, link->conf.Vcc%10,
	   link->conf.Vpp1/10, link->conf.Vpp1%10);

    link->state &= ~DEV_CONFIG_PENDING;
    return;
    
cs_failed:
    cs_error(link->handle, last_fn, last_ret);
failed:
    ide_release(link);
    link->state &= ~DEV_CONFIG_PENDING;

} /* ide_config */
Пример #19
0
static int cachemiss_thread (void *data)
{
	tux_req_t *req;
	struct k_sigaction *ka;
	DECLARE_WAITQUEUE(wait, current);
	iothread_t *iot = data;
	int nr = iot->ti->cpu, wake_up;

	Dprintk("iot %p/%p got started.\n", iot, current);
	drop_permissions();

	spin_lock(&iot->async_lock);
	iot->threads++;
	sprintf(current->comm, "async IO %d/%d", nr, iot->threads);


	spin_lock_irq(&current->sighand->siglock);
	ka = current->sighand->action + SIGCHLD-1;
	ka->sa.sa_handler = SIG_IGN;
	siginitsetinv(&current->blocked, sigmask(SIGCHLD));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_unlock(&iot->async_lock);
#ifdef CONFIG_SMP
	{
		cpumask_t mask;

		if (cpu_isset(nr, cpu_online_map)) {
			cpus_clear(mask);
			cpu_set(nr, mask);
			set_cpus_allowed(current, mask);
		}

	}
#endif

	add_wait_queue_exclusive(&iot->async_sleep, &wait);

	for (;;) {
		while (!list_empty(&iot->async_queue) &&
				(req = get_cachemiss(iot))) {

			if (!req->atom_idx) {
				add_tux_atom(req, flush_request);
				add_req_to_workqueue(req);
				continue;
			}
			tux_schedule_atom(req, 1);
			if (signal_pending(current))
				flush_all_signals();
		}
		if (signal_pending(current))
			flush_all_signals();
		if (!list_empty(&iot->async_queue))
			continue;
		if (iot->shutdown) {
			Dprintk("iot %p/%p got shutdown!\n", iot, current);
			break;
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&iot->async_queue)) {
			Dprintk("iot %p/%p going to sleep.\n", iot, current);
			schedule();
			Dprintk("iot %p/%p got woken up.\n", iot, current);
		}
		__set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&iot->async_sleep, &wait);

	wake_up = 0;
	spin_lock(&iot->async_lock);
	if (!--iot->threads)
		wake_up = 1;
	spin_unlock(&iot->async_lock);
	Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
	if (wake_up) {
		Dprintk("iot %p/%p waking up master.\n", iot, current);
		wake_up(&iot->wait_shutdown);
	}

	return 0;
}
//仔细研读
static int usb_stor_control_thread(void * __us)
{

	struct us_data *us = (struct us_data *)__us;
	struct Scsi_Host *host = us_to_host(us);
//---------------------------
pr_info("7 usb stor control thread\n");

	for(;;) {
		US_DEBUGP("*** thread sleeping.\n");
		if (wait_for_completion_interruptible(&us->cmnd_ready))
			break;

		US_DEBUGP("*** thread awakened.\n");

		/* lock the device pointers */
		mutex_lock(&(us->dev_mutex));

		/* lock access to the state */
		scsi_lock(host);

		/* When we are called with no command pending, we're done */
		if (us->srb == NULL) {
			scsi_unlock(host);
			mutex_unlock(&us->dev_mutex);
			US_DEBUGP("-- exiting\n");
			break;
		}

		/* has the command timed out *already* ? */
		if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
			us->srb->result = DID_ABORT << 16;
			goto SkipForAbort;
		}

		scsi_unlock(host);

		/* reject the command if the direction indicator 
		 * is UNKNOWN
		 */
		if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
			US_DEBUGP("UNKNOWN data direction\n");
			us->srb->result = DID_ERROR << 16;
		}

		/* reject if target != 0 or if LUN is higher than
		 * the maximum known LUN
		 */
		else if (us->srb->device->id && 
				!(us->fflags & US_FL_SCM_MULT_TARG)) {
			US_DEBUGP("Bad target number (%d:%d)\n",
				  us->srb->device->id, us->srb->device->lun);
			us->srb->result = DID_BAD_TARGET << 16;
		}

		else if (us->srb->device->lun > us->max_lun) {
			US_DEBUGP("Bad LUN (%d:%d)\n",
				  us->srb->device->id, us->srb->device->lun);
			us->srb->result = DID_BAD_TARGET << 16;
		}

		/* Handle those devices which need us to fake 
		 * their inquiry data */
		else if ((us->srb->cmnd[0] == INQUIRY) &&
			    (us->fflags & US_FL_FIX_INQUIRY)) {
			unsigned char data_ptr[36] = {
			    0x00, 0x80, 0x02, 0x02,
			    0x1F, 0x00, 0x00, 0x00};

			US_DEBUGP("Faking INQUIRY command\n");
			fill_inquiry_response(us, data_ptr, 36);
			us->srb->result = SAM_STAT_GOOD;
		}

		/* we've got a command, let's do it! */
		else {
			US_DEBUG(usb_stor_show_command(us->srb));
			us->proto_handler(us->srb, us);
			usb_mark_last_busy(us->pusb_dev);
		}

		/* lock access to the state */
		scsi_lock(host);

		/* indicate that the command is done */
		if (us->srb->result != DID_ABORT << 16) {
			US_DEBUGP("scsi cmd done, result=0x%x\n", 
				   us->srb->result);
			us->srb->scsi_done(us->srb);
		} else {
SkipForAbort:
			US_DEBUGP("scsi command aborted\n");
		}

		/* If an abort request was received we need to signal that
		 * the abort has finished.  The proper test for this is
		 * the TIMED_OUT flag, not srb->result == DID_ABORT, because
		 * the timeout might have occurred after the command had
		 * already completed with a different result code. */
		if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
			complete(&(us->notify));

			/* Allow USB transfers to resume */
			clear_bit(US_FLIDX_ABORTING, &us->dflags);
			clear_bit(US_FLIDX_TIMED_OUT, &us->dflags);
		}

		/* finished working on this command */
		us->srb = NULL;
		scsi_unlock(host);

		/* unlock the device pointers */
		mutex_unlock(&us->dev_mutex);
	} /* for (;;) */

	/* Wait until we are told to stop */
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}	
Пример #21
0
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority)
{
	struct list_head * entry;
	int max_scan = nr_inactive_pages / priority;
	int max_mapped = min((nr_pages << (10 - priority)), max_scan / 10);

	spin_lock(&pagemap_lru_lock);
	while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) {
		struct page * page;

		/* lock depth is 1 or 2 */
		if (unlikely(current->need_resched)) {
			spin_unlock(&pagemap_lru_lock);
			__set_current_state(TASK_RUNNING);
			schedule();
			spin_lock(&pagemap_lru_lock);
			continue;
		}

		page = list_entry(entry, struct page, lru);

		if (unlikely(!PageLRU(page)))
			BUG();
		if (unlikely(PageActive(page)))
			BUG();

		list_del(entry);
		list_add(entry, &inactive_list);

		/*
		 * Zero page counts can happen because we unlink the pages
		 * _after_ decrementing the usage count..
		 */
		if (unlikely(!page_count(page)))
			continue;

		if (!memclass(page->zone, classzone))
			continue;

		/* Racy check to avoid trylocking when not worthwhile */
		if (!page->buffers && (page_count(page) != 1 || !page->mapping))
			goto page_mapped;

		/*
		 * The page is locked. IO in progress?
		 * Move it to the back of the list.
		 */
		if (unlikely(TryLockPage(page))) {
			if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);
				wait_on_page(page);
				page_cache_release(page);
				spin_lock(&pagemap_lru_lock);
			}
			continue;
		}

		if ((PageDirty(page) || DelallocPage(page)) && is_page_cache_freeable(page) && page->mapping) {
			/*
			 * It is not critical here to write it only if
			 * the page is unmapped beause any direct writer
			 * like O_DIRECT would set the PG_dirty bitflag
			 * on the phisical page after having successfully
			 * pinned it and after the I/O to the page is finished,
			 * so the direct writes to the page cannot get lost.
			 */
			int (*writepage)(struct page *);

			writepage = page->mapping->a_ops->writepage;
			if ((gfp_mask & __GFP_FS) && writepage) {
				ClearPageDirty(page);
				SetPageLaunder(page);
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);

				writepage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		/*
		 * If the page has buffers, try to free the buffer mappings
		 * associated with this page. If we succeed we try to free
		 * the page as well.
		 */
		if (page->buffers) {
			spin_unlock(&pagemap_lru_lock);

			/* avoid to free a locked page */
			page_cache_get(page);

			if (try_to_release_page(page, gfp_mask)) {
				if (!page->mapping) {
					/*
					 * We must not allow an anon page
					 * with no buffers to be visible on
					 * the LRU, so we unlock the page after
					 * taking the lru lock
					 */
					spin_lock(&pagemap_lru_lock);
					UnlockPage(page);
					__lru_cache_del(page);

					/* effectively free the page here */
					page_cache_release(page);

					if (--nr_pages)
						continue;
					break;
				} else {
					/*
					 * The page is still in pagecache so undo the stuff
					 * before the try_to_release_page since we've not
					 * finished and we can now try the next step.
					 */
					page_cache_release(page);

					spin_lock(&pagemap_lru_lock);
				}
			} else {
				/* failed to drop the buffers so stop here */
				UnlockPage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		spin_lock(&pagecache_lock);

		/*
		 * this is the non-racy check for busy page.
		 */
		if (!page->mapping || !is_page_cache_freeable(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
page_mapped:
			if (--max_mapped >= 0)
				continue;

			/*
			 * Alert! We've found too many mapped pages on the
			 * inactive list, so we start swapping out now!
			 */
			spin_unlock(&pagemap_lru_lock);
			swap_out(priority, gfp_mask, classzone);
			return nr_pages;
		}

		/*
		 * It is critical to check PageDirty _after_ we made sure
		 * the page is freeable* so not in use by anybody.
		 */
		if (PageDirty(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
			continue;
		}

		/* point of no return */
		if (likely(!PageSwapCache(page))) {
			__remove_inode_page(page);
			spin_unlock(&pagecache_lock);
		} else {
			swp_entry_t swap;
			swap.val = page->index;
			__delete_from_swap_cache(page);
			spin_unlock(&pagecache_lock);
			swap_free(swap);
		}

		__lru_cache_del(page);
		UnlockPage(page);

		/* effectively free the page here */
		page_cache_release(page);

		if (--nr_pages)
			continue;
		break;
	}
	spin_unlock(&pagemap_lru_lock);

	return nr_pages;
}
Пример #22
0
static ssize_t read_chan(struct tty_struct *tty, struct file *file,
			 unsigned char __user *buf, size_t nr)
{
	unsigned char __user *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	int minimum, time;
	ssize_t retval = 0;
	ssize_t size;
	long timeout;
	unsigned long flags;

do_it_again:

	if (!tty->read_buf) {
		printk("n_tty_read_chan: called with read_buf == NULL?!?\n");
		return -EIO;
	}

	c = job_control(tty, file);
	if(c < 0)
		return c;
	
	minimum = time = 0;
	timeout = MAX_SCHEDULE_TIMEOUT;
	if (!tty->icanon) {
		time = (HZ / 10) * TIME_CHAR(tty);
		minimum = MIN_CHAR(tty);
		if (minimum) {
			if (time)
				tty->minimum_to_wake = 1;
			else if (!waitqueue_active(&tty->read_wait) ||
				 (tty->minimum_to_wake > minimum))
				tty->minimum_to_wake = minimum;
		} else {
			timeout = 0;
			if (time) {
				timeout = time;
				time = 0;
			}
			tty->minimum_to_wake = minimum = 1;
		}
	}

	/*
	 *	Internal serialization of reads.
	 */
	if (tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK) {
		if (!mutex_trylock(&tty->atomic_read_lock))
			return -EAGAIN;
	}
	else {
		if (mutex_lock_interruptible(&tty->atomic_read_lock))
			return -ERESTARTSYS;
	}

	add_wait_queue(&tty->read_wait, &wait);
	while (nr) {
		/* First test for status change. */
		if (tty->packet && tty->link->ctrl_status) {
			unsigned char cs;
			if (b != buf)
				break;
			cs = tty->link->ctrl_status;
			tty->link->ctrl_status = 0;
			if (put_user(cs, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
			break;
		}
		/* This statement must be first before checking for input
		   so that any interrupt will set the state back to
		   TASK_RUNNING. */
		set_current_state(TASK_INTERRUPTIBLE);
		
		if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
		    ((minimum - (b - buf)) >= 1))
			tty->minimum_to_wake = (minimum - (b - buf));
		
		if (!input_available_p(tty, 0)) {
			if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
				retval = -EIO;
				break;
			}
			if (tty_hung_up_p(file))
				break;
			if (!timeout)
				break;
			if (tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				retval = -ERESTARTSYS;
				break;
			}
			n_tty_set_room(tty);
			timeout = schedule_timeout(timeout);
			continue;
		}
		__set_current_state(TASK_RUNNING);

		/* Deal with packet mode. */
		if (tty->packet && b == buf) {
			if (put_user(TIOCPKT_DATA, b++)) {
				retval = -EFAULT;
				b--;
				break;
			}
			nr--;
		}

		if (tty->icanon) {
			/* N.B. avoid overrun if nr == 0 */
			while (nr && tty->read_cnt) {
 				int eol;

				eol = test_and_clear_bit(tty->read_tail,
						tty->read_flags);
				c = tty->read_buf[tty->read_tail];
				spin_lock_irqsave(&tty->read_lock, flags);
				tty->read_tail = ((tty->read_tail+1) &
						  (N_TTY_BUF_SIZE-1));
				tty->read_cnt--;
				if (eol) {
					/* this test should be redundant:
					 * we shouldn't be reading data if
					 * canon_data is 0
					 */
					if (--tty->canon_data < 0)
						tty->canon_data = 0;
				}
				spin_unlock_irqrestore(&tty->read_lock, flags);

				if (!eol || (c != __DISABLED_CHAR)) {
					if (put_user(c, b++)) {
						retval = -EFAULT;
						b--;
						break;
					}
					nr--;
				}
				if (eol)
					break;
			}
			if (retval)
				break;
		} else {
			int uncopied;
			uncopied = copy_from_read_buf(tty, &b, &nr);
			uncopied += copy_from_read_buf(tty, &b, &nr);
			if (uncopied) {
				retval = -EFAULT;
				break;
			}
		}

		/* If there is enough space in the read buffer now, let the
		 * low-level driver know. We use n_tty_chars_in_buffer() to
		 * check the buffer, as it now knows about canonical mode.
		 * Otherwise, if the driver is throttled and the line is
		 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
		 * we won't get any more characters.
		 */
		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
			n_tty_set_room(tty);
			check_unthrottle(tty);
		}

		if (b - buf >= minimum)
			break;
		if (time)
			timeout = time;
	}
	mutex_unlock(&tty->atomic_read_lock);
	remove_wait_queue(&tty->read_wait, &wait);

	if (!waitqueue_active(&tty->read_wait))
		tty->minimum_to_wake = minimum;

	__set_current_state(TASK_RUNNING);
	size = b - buf;
	if (size) {
		retval = size;
		if (nr)
	       		clear_bit(TTY_PUSH, &tty->flags);
	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
		 goto do_it_again;

	n_tty_set_room(tty);

	return retval;
}
Пример #23
0
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
			   const unsigned char *buf, size_t nr)
{
	const unsigned char *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	ssize_t retval = 0;

	
	if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
		retval = tty_check_change(tty);
		if (retval)
			return retval;
	}

	
	process_echoes(tty);

	add_wait_queue(&tty->write_wait, &wait);
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
			retval = -EIO;
			break;
		}
		if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
			while (nr > 0) {
				ssize_t num = process_output_block(tty, b, nr);
				if (num < 0) {
					if (num == -EAGAIN)
						break;
					retval = num;
					goto break_out;
				}
				b += num;
				nr -= num;
				if (nr == 0)
					break;
				c = *b;
				if (process_output(c, tty) < 0)
					break;
				b++; nr--;
			}
			if (tty->ops->flush_chars)
				tty->ops->flush_chars(tty);
		} else {
			while (nr > 0) {
				c = tty->ops->write(tty, b, nr);
				if (c < 0) {
					retval = c;
					goto break_out;
				}
				if (!c)
					break;
				b += c;
				nr -= c;
			}
		}
		if (!nr)
			break;
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			break;
		}
		schedule();
	}
break_out:
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&tty->write_wait, &wait);
	if (b - buf != nr && tty->fasync)
		set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
	return (b - buf) ? b - buf : retval;
}
Пример #24
0
static ssize_t write_chan(struct tty_struct * tty, struct file * file,
			  const unsigned char * buf, size_t nr)
{
	const unsigned char *b = buf;
	DECLARE_WAITQUEUE(wait, current);
	int c;
	ssize_t retval = 0;
	int transaction_commit = committing_transaction();

	/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
	if ((!transaction_commit) && 
	    L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
		retval = tty_check_change(tty);
		if (retval)
			return retval;
	}

	add_wait_queue(&tty->write_wait, &wait);
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
		if ((!transaction_commit) &&
		    (tty_hung_up_p(file) || (tty->link && !tty->link->count))) {
			retval = -EIO;
			break;
		}
		if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
			while (nr > 0) {
				ssize_t num = opost_block(tty, b, nr);
				if (num < 0) {
					if (num == -EAGAIN)
						break;
					retval = num;
					goto break_out;
				}
				b += num;
				nr -= num;
				if (nr == 0)
					break;
				c = *b;
				if (opost(c, tty) < 0)
					break;
				b++; nr--;
			}
			if (tty->driver->flush_chars)
				tty->driver->flush_chars(tty);
		} else {
			while (nr > 0) {
				c = tty->driver->write(tty, b, nr);
				if (c < 0) {
					retval = c;
					goto break_out;
				}
				if (!c)
					break;
				b += c;
				nr -= c;
			}
		}
		if (!nr)
			break;
		if ((!transaction_commit) &&
		    (tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK)) {
			retval = -EAGAIN;
			break;
		}
		schedule();
	}
break_out:
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&tty->write_wait, &wait);
	return (b - buf) ? b - buf : retval;
}
Пример #25
0
/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 * If we're over `background_thresh' then the writeback threads are woken to
 * perform some writeout.
 */
static void balance_dirty_pages(struct address_space *mapping,
				unsigned long write_chunk)
{
	long nr_reclaimable, bdi_nr_reclaimable;
	long nr_writeback, bdi_nr_writeback;
	unsigned long background_thresh;
	unsigned long dirty_thresh;
	unsigned long bdi_thresh;
	unsigned long pages_written = 0;
	unsigned long pause = 1;
	bool dirty_exceeded = false;
	struct backing_dev_info *bdi = mapping->backing_dev_info;

	for (;;) {
		struct writeback_control wbc = {
			.sync_mode	= WB_SYNC_NONE,
			.older_than_this = NULL,
			.nr_to_write	= write_chunk,
			.range_cyclic	= 1,
		};

		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
		nr_writeback = global_page_state(NR_WRITEBACK);

		global_dirty_limits(&background_thresh, &dirty_thresh);

		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
		 * when the bdi limits are ramping up.
		 */
		if (nr_reclaimable + nr_writeback <=
				(background_thresh + dirty_thresh) / 2)
			break;

		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
		bdi_thresh = task_dirty_limit(current, bdi_thresh);

		/*
		 * In order to avoid the stacked BDI deadlock we need
		 * to ensure we accurately count the 'dirty' pages when
		 * the threshold is low.
		 *
		 * Otherwise it would be possible to get thresh+n pages
		 * reported dirty, even though there are thresh-m pages
		 * actually dirty; with m+n sitting in the percpu
		 * deltas.
		 */
		if (bdi_thresh < 2*bdi_stat_error(bdi)) {
			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
		} else {
			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
		}

		/*
		 * The bdi thresh is somehow "soft" limit derived from the
		 * global "hard" limit. The former helps to prevent heavy IO
		 * bdi or process from holding back light ones; The latter is
		 * the last resort safeguard.
		 */
		dirty_exceeded =
			(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
			|| (nr_reclaimable + nr_writeback > dirty_thresh);

		if (!dirty_exceeded)
			break;

		if (!bdi->dirty_exceeded)
			bdi->dirty_exceeded = 1;

		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 * Only move pages to writeback if this bdi is over its
		 * threshold otherwise wait until the disk writes catch
		 * up.
		 */
		trace_wbc_balance_dirty_start(&wbc, bdi);
		if (bdi_nr_reclaimable > bdi_thresh) {
			writeback_inodes_wb(&bdi->wb, &wbc);
			pages_written += write_chunk - wbc.nr_to_write;
			trace_wbc_balance_dirty_written(&wbc, bdi);
			if (pages_written >= write_chunk)
				break;		/* We've done our duty */
		}
		trace_wbc_balance_dirty_wait(&wbc, bdi);
		__set_current_state(TASK_UNINTERRUPTIBLE);
		io_schedule_timeout(pause);

		/*
		 * Increase the delay for each loop, up to our previous
		 * default of taking a 100ms nap.
		 */
		pause <<= 1;
		if (pause > HZ / 10)
			break;
	}

	if (!dirty_exceeded && bdi->dirty_exceeded)
		bdi->dirty_exceeded = 0;

	if (writeback_in_progress(bdi))
		return;

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
	if ((laptop_mode && pages_written) ||
	    (!laptop_mode && (nr_reclaimable > background_thresh)))
		bdi_start_background_writeback(bdi);
}

void set_page_dirty_balance(struct page *page, int page_mkwrite)
{
	if (set_page_dirty(page) || page_mkwrite) {
		struct address_space *mapping = page_mapping(page);

		if (mapping)
			balance_dirty_pages_ratelimited(mapping);
	}
}

static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;

/**
 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 * @mapping: address_space which was dirtied
 * @nr_pages_dirtied: number of pages which the caller has just dirtied
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
					unsigned long nr_pages_dirtied)
{
	unsigned long ratelimit;
	unsigned long *p;

	ratelimit = ratelimit_pages;
	if (mapping->backing_dev_info->dirty_exceeded)
		ratelimit = 8;

	/*
	 * Check the rate limiting. Also, we do not want to throttle real-time
	 * tasks in balance_dirty_pages(). Period.
	 */
	preempt_disable();
	p =  &__get_cpu_var(bdp_ratelimits);
	*p += nr_pages_dirtied;
	if (unlikely(*p >= ratelimit)) {
		ratelimit = sync_writeback_pages(*p);
		*p = 0;
		preempt_enable();
		balance_dirty_pages(mapping, ratelimit);
		return;
	}
	preempt_enable();
}
Пример #26
0
int vmw_fallback_wait(struct vmw_private *dev_priv,
		      bool lazy,
		      bool fifo_idle,
		      uint32_t seqno,
		      bool interruptible,
		      unsigned long timeout)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;

	uint32_t count = 0;
	uint32_t signal_seq;
	int ret;
	unsigned long end_jiffies = jiffies + timeout;
	bool (*wait_condition)(struct vmw_private *, uint32_t);
	DEFINE_WAIT(__wait);

	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
		&vmw_seqno_passed;

	/**
	 * Block command submission while waiting for idle.
	 */

	if (fifo_idle)
		down_read(&fifo_state->rwsem);
	signal_seq = atomic_read(&dev_priv->marker_seq);
	ret = 0;

	for (;;) {
		prepare_to_wait(&dev_priv->fence_queue, &__wait,
				(interruptible) ?
				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
		if (wait_condition(dev_priv, seqno))
			break;
		if (time_after_eq(jiffies, end_jiffies)) {
			DRM_ERROR("SVGA device lockup.\n");
			break;
		}
		if (lazy)
			schedule_timeout(1);
		else if ((++count & 0x0F) == 0) {
			/**
			 * FIXME: Use schedule_hr_timeout here for
			 * newer kernels and lower CPU utilization.
			 */

			__set_current_state(TASK_RUNNING);
			schedule();
			__set_current_state((interruptible) ?
					    TASK_INTERRUPTIBLE :
					    TASK_UNINTERRUPTIBLE);
		}
		if (interruptible && signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
	}
	finish_wait(&dev_priv->fence_queue, &__wait);
	if (ret == 0 && fifo_idle) {
		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
	}
	wake_up_all(&dev_priv->fence_queue);
	if (fifo_idle)
		up_read(&fifo_state->rwsem);

	return ret;
}
Пример #27
0
/**
 * n_hdlc_tty_write - write a single frame of data to device
 * @tty	- pointer to associated tty device instance data
 * @file - pointer to file object data
 * @data - pointer to transmit data (one frame)
 * @count - size of transmit frame in bytes
 * 		
 * Returns the number of bytes written (or error code).
 */
static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
			    const unsigned char *data, size_t count)
{
	struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
	int error = 0;
	DECLARE_WAITQUEUE(wait, current);
	struct n_hdlc_buf *tbuf;

	if (debuglevel >= DEBUG_LEVEL_INFO)	
		printk("%s(%d)n_hdlc_tty_write() called count=%Zd\n",
			__FILE__,__LINE__,count);
		
	/* Verify pointers */
	if (!n_hdlc)
		return -EIO;

	if (n_hdlc->magic != HDLC_MAGIC)
		return -EIO;

	/* verify frame size */
	if (count > maxframe ) {
		if (debuglevel & DEBUG_LEVEL_INFO)
			printk (KERN_WARNING
				"n_hdlc_tty_write: truncating user packet "
				"from %lu to %d\n", (unsigned long) count,
				maxframe );
		count = maxframe;
	}
	
	add_wait_queue(&tty->write_wait, &wait);

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
	
		tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
		if (tbuf)
			break;

		if (file->f_flags & O_NONBLOCK) {
			error = -EAGAIN;
			break;
		}
		schedule();
			
		n_hdlc = tty2n_hdlc (tty);
		if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || 
		    tty != n_hdlc->tty) {
			printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc);
			error = -EIO;
			break;
		}
			
		if (signal_pending(current)) {
			error = -EINTR;
			break;
		}
	}

	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&tty->write_wait, &wait);

	if (!error) {		
		/* Retrieve the user's buffer */
		memcpy(tbuf->buf, data, count);

		/* Send the data */
		tbuf->count = error = count;
		n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
		n_hdlc_send_frames(n_hdlc,tty);
	}

	return error;
	
}	/* end of n_hdlc_tty_write() */
Пример #28
0
int do_select(int n, fd_set_bits *fds, long *timeout)
{
	struct poll_wqueues table;
	poll_table *wait;
	int retval, i;
	long __timeout = *timeout;

 	spin_lock(&current->files->file_lock);
	retval = max_select_fd(n, fds);
	spin_unlock(&current->files->file_lock);

	if (retval < 0)
		return retval;
	n = retval;

	poll_initwait(&table);
	wait = &table.pt;
	if (!__timeout)
		wait = NULL;
	retval = 0;
	for (;;) {
		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;

		set_current_state(TASK_INTERRUPTIBLE);

		inp = fds->in; outp = fds->out; exp = fds->ex;
		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;

		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
			unsigned long res_in = 0, res_out = 0, res_ex = 0;
			struct file_operations *f_op = NULL;
			struct file *file = NULL;

			in = *inp++; out = *outp++; ex = *exp++;
			all_bits = in | out | ex;
			if (all_bits == 0) {
				i += __NFDBITS;
				continue;
			}

			for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
				if (i >= n)
					break;
				if (!(bit & all_bits))
					continue;
				file = fget(i);
				if (file) {
					f_op = file->f_op;
					mask = DEFAULT_POLLMASK;
					if (f_op && f_op->poll)
						mask = (*f_op->poll)(file, retval ? NULL : wait);
					fput(file);
					if ((mask & POLLIN_SET) && (in & bit)) {
						res_in |= bit;
						retval++;
					}
					if ((mask & POLLOUT_SET) && (out & bit)) {
						res_out |= bit;
						retval++;
					}
					if ((mask & POLLEX_SET) && (ex & bit)) {
						res_ex |= bit;
						retval++;
					}
				}
				cond_resched();
			}
			if (res_in)
				*rinp = res_in;
			if (res_out)
				*routp = res_out;
			if (res_ex)
				*rexp = res_ex;
		}
		wait = NULL;
		if (retval || !__timeout || signal_pending(current))
			break;
		if(table.error) {
			retval = table.error;
			break;
		}
		__timeout = schedule_timeout(__timeout);
	}
	__set_current_state(TASK_RUNNING);

	poll_freewait(&table);

	/*
	 * Up-to-date the caller timeout.
	 */
	*timeout = __timeout;
	return retval;
}
Пример #29
0
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
Пример #30
0
static int ntb_perf_thread(void *data)
{
	struct pthr_ctx *pctx = data;
	struct perf_ctx *perf = pctx->perf;
	struct pci_dev *pdev = perf->ntb->pdev;
	struct perf_mw *mw = &perf->mw;
	char __iomem *dst;
	u64 win_size, buf_size, total;
	void *src;
	int rc, node, i;
	struct dma_chan *dma_chan = NULL;

	pr_debug("kthread %s starting...\n", current->comm);

	node = dev_to_node(&pdev->dev);

	if (use_dma && !pctx->dma_chan) {
		dma_cap_mask_t dma_mask;

		dma_cap_zero(dma_mask);
		dma_cap_set(DMA_MEMCPY, dma_mask);
		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
					       (void *)(unsigned long)node);
		if (!dma_chan) {
			pr_warn("%s: cannot acquire DMA channel, quitting\n",
				current->comm);
			return -ENODEV;
		}
		pctx->dma_chan = dma_chan;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
		if (!pctx->srcs[i]) {
			rc = -ENOMEM;
			goto err;
		}
	}

	win_size = mw->phys_size;
	buf_size = 1ULL << seg_order;
	total = 1ULL << run_order;

	if (buf_size > MAX_TEST_SIZE)
		buf_size = MAX_TEST_SIZE;

	dst = (char __iomem *)mw->vbase;

	atomic_inc(&perf->tsync);
	while (atomic_read(&perf->tsync) != perf->perf_threads)
		schedule();

	src = pctx->srcs[pctx->src_idx];
	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);

	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);

	atomic_dec(&perf->tsync);

	if (rc < 0) {
		pr_err("%s: failed\n", current->comm);
		rc = -ENXIO;
		goto err;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	atomic_inc(&perf->tdone);
	wake_up(pctx->wq);
	rc = 0;
	goto done;

err:
	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	if (dma_chan) {
		dma_release_channel(dma_chan);
		pctx->dma_chan = NULL;
	}

done:
	/* Wait until we are told to stop */
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);

	return rc;
}