示例#1
0
static inline int ls_yyy_spin_unlock(ls_spinlock_t *p)
{
    barrier();
    *p = LS_LOCK_AVAIL;
    return 0;
}
示例#2
0
/**
 * kthread_create_ve - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_ve(struct ve_struct *ve,
				   int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;
	struct ve_struct *old_ve;

	old_ve = set_exec_env(ve);

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}

	set_exec_env(old_ve);

	return create.result;
}
EXPORT_SYMBOL(kthread_create_ve);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *data)
{
	struct task_struct *tsk = current;
	struct kthreadd_create_info *kcreate;
	struct kthread self;
	int rc;

	self.should_stop = 0;

	kcreate = (struct kthreadd_create_info *) data;

	if (kcreate) {
		daemonize("kthreadd/%d", get_exec_env()->veid);
		kcreate->result = current;
		set_fs(KERNEL_DS);
		init_completion(&self.exited);
		current->vfork_done = &self.exited;
	} else
		set_task_comm(tsk, "kthreadd");

	/* Setup a clean context for our children to inherit. */
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	if (kcreate)
		complete(&kcreate->done);

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list)) {
			if (self.should_stop)
				break;
			else
				schedule();
		}
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	do {
		clear_thread_flag(TIF_SIGPENDING);
		rc = sys_wait4(-1, NULL, __WALL, NULL);
	} while (rc != -ECHILD);

	do_exit(0);
}

int kthreadd_create()
{
	struct kthreadd_create_info create;
	int ret;
	struct ve_struct *ve = get_exec_env();

	BUG_ON(ve->_kthreadd_task);

	INIT_LIST_HEAD(&ve->_kthread_create_list);
	init_completion(&create.done);
	ret = kernel_thread(kthreadd, (void *) &create, CLONE_FS);
	if (ret < 0) {
		return ret;
	}
	wait_for_completion(&create.done);
	ve->_kthreadd_task = create.result;
	return 0;
}
EXPORT_SYMBOL(kthreadd_create);

void kthreadd_stop(struct ve_struct *ve)
{
	struct kthread *kthread;
	int ret;
	struct task_struct *k;

	if (!ve->_kthreadd_task)
		return;

	k = ve->_kthreadd_task;
	trace_sched_kthread_stop(k);
	get_task_struct(k);

	BUG_ON(!k->vfork_done);

	kthread = container_of(k->vfork_done, struct kthread, exited);
	kthread->should_stop = 1;
	wake_up_process(k);
	wait_for_completion(&kthread->exited);
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);
}
EXPORT_SYMBOL(kthreadd_stop);
示例#3
0
/**
 * xuartps_console_wait_tx - Wait for the TX to be full
 * @port: Handle to the uart port structure
 *
 **/
static void xuartps_console_wait_tx(struct uart_port *port)
{
	while ((xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_TXEMPTY)
				!= XUARTPS_SR_TXEMPTY)
		barrier();
}
示例#4
0
/*
 * Initialize the Linux inode, set up the operation vectors and
 * unlock the inode.
 *
 * When reading existing inodes from disk this is called directly
 * from xfs_iget, when creating a new inode it is called from
 * xfs_ialloc after setting up the inode.
 *
 * We are always called with an uninitialised linux inode here.
 * We need to initialise the necessary fields and take a reference
 * on it.
 */
void
xfs_setup_inode(
	struct xfs_inode	*ip)
{
	struct inode		*inode = &ip->i_vnode;

	inode->i_ino = ip->i_ino;
	inode->i_state = I_NEW;
	inode_add_to_lists(ip->i_mount->m_super, inode);

	inode->i_mode	= ip->i_d.di_mode;
	inode->i_nlink	= ip->i_d.di_nlink;
	inode->i_uid	= ip->i_d.di_uid;
	inode->i_gid	= ip->i_d.di_gid;

	switch (inode->i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		inode->i_rdev =
			MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
			      sysv_minor(ip->i_df.if_u2.if_rdev));
		break;
	default:
		inode->i_rdev = 0;
		break;
	}

	inode->i_generation = ip->i_d.di_gen;
	i_size_write(inode, ip->i_d.di_size);
	inode->i_atime.tv_sec	= ip->i_d.di_atime.t_sec;
	inode->i_atime.tv_nsec	= ip->i_d.di_atime.t_nsec;
	inode->i_mtime.tv_sec	= ip->i_d.di_mtime.t_sec;
	inode->i_mtime.tv_nsec	= ip->i_d.di_mtime.t_nsec;
	inode->i_ctime.tv_sec	= ip->i_d.di_ctime.t_sec;
	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
	xfs_diflags_to_iflags(inode, ip);

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_op = &xfs_inode_operations;
		inode->i_fop = &xfs_file_operations;
		inode->i_mapping->a_ops = &xfs_address_space_operations;
		break;
	case S_IFDIR:
		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
			inode->i_op = &xfs_dir_ci_inode_operations;
		else
			inode->i_op = &xfs_dir_inode_operations;
		inode->i_fop = &xfs_dir_file_operations;
		break;
	case S_IFLNK:
		inode->i_op = &xfs_symlink_inode_operations;
		if (!(ip->i_df.if_flags & XFS_IFINLINE))
			inode->i_mapping->a_ops = &xfs_address_space_operations;
		break;
	default:
		inode->i_op = &xfs_inode_operations;
		init_special_inode(inode, inode->i_mode, inode->i_rdev);
		break;
	}

	xfs_iflags_clear(ip, XFS_INEW);
	barrier();

	unlock_new_inode(inode);
}
示例#5
0
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);
}
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip)
{
	add_preempt_count(SOFTIRQ_OFFSET);
	barrier();
}
示例#6
0
文件: i2c-omap.c 项目: 020gzh/linux
/*
 * Low level master read/write transaction.
 */
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
			     struct i2c_msg *msg, int stop)
{
	struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
	unsigned long timeout;
	u16 w;

	dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
		msg->addr, msg->len, msg->flags, stop);

	if (msg->len == 0)
		return -EINVAL;

	omap->receiver = !!(msg->flags & I2C_M_RD);
	omap_i2c_resize_fifo(omap, msg->len, omap->receiver);

	omap_i2c_write_reg(omap, OMAP_I2C_SA_REG, msg->addr);

	/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
	omap->buf = msg->buf;
	omap->buf_len = msg->len;

	/* make sure writes to omap->buf_len are ordered */
	barrier();

	omap_i2c_write_reg(omap, OMAP_I2C_CNT_REG, omap->buf_len);

	/* Clear the FIFO Buffers */
	w = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG);
	w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
	omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w);

	reinit_completion(&omap->cmd_complete);
	omap->cmd_err = 0;

	w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;

	/* High speed configuration */
	if (omap->speed > 400)
		w |= OMAP_I2C_CON_OPMODE_HS;

	if (msg->flags & I2C_M_STOP)
		stop = 1;
	if (msg->flags & I2C_M_TEN)
		w |= OMAP_I2C_CON_XA;
	if (!(msg->flags & I2C_M_RD))
		w |= OMAP_I2C_CON_TRX;

	if (!omap->b_hw && stop)
		w |= OMAP_I2C_CON_STP;
	/*
	 * NOTE: STAT_BB bit could became 1 here if another master occupy
	 * the bus. IP successfully complete transfer when the bus will be
	 * free again (BB reset to 0).
	 */
	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);

	/*
	 * Don't write stt and stp together on some hardware.
	 */
	if (omap->b_hw && stop) {
		unsigned long delay = jiffies + OMAP_I2C_TIMEOUT;
		u16 con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
		while (con & OMAP_I2C_CON_STT) {
			con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);

			/* Let the user know if i2c is in a bad state */
			if (time_after(jiffies, delay)) {
				dev_err(omap->dev, "controller timed out "
				"waiting for start condition to finish\n");
				return -ETIMEDOUT;
			}
			cpu_relax();
		}

		w |= OMAP_I2C_CON_STP;
		w &= ~OMAP_I2C_CON_STT;
		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
	}

	/*
	 * REVISIT: We should abort the transfer on signals, but the bus goes
	 * into arbitration and we're currently unable to recover from it.
	 */
	timeout = wait_for_completion_timeout(&omap->cmd_complete,
						OMAP_I2C_TIMEOUT);
	if (timeout == 0) {
		dev_err(omap->dev, "controller timed out\n");
		omap_i2c_reset(omap);
		__omap_i2c_init(omap);
		return -ETIMEDOUT;
	}

	if (likely(!omap->cmd_err))
		return 0;

	/* We have an error */
	if (omap->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
		omap_i2c_reset(omap);
		__omap_i2c_init(omap);
		return -EIO;
	}

	if (omap->cmd_err & OMAP_I2C_STAT_AL)
		return -EAGAIN;

	if (omap->cmd_err & OMAP_I2C_STAT_NACK) {
		if (msg->flags & I2C_M_IGNORE_NAK)
			return 0;

		w = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
		w |= OMAP_I2C_CON_STP;
		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
		return -EREMOTEIO;
	}
	return -EIO;
}
示例#7
0
static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
{
	struct clip_priv *clip_priv = PRIV(dev);
	struct atmarp_entry *entry;
	struct atm_vcc *vcc;
	int old;
	unsigned long flags;

	DPRINTK("clip_start_xmit (skb %p)\n",skb);
	if (!skb->dst) {
		printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->dst->neighbour) {
#if 0
		skb->dst->neighbour = clip_find_neighbour(skb->dst,1);
		if (!skb->dst->neighbour) {
			dev_kfree_skb(skb); /* lost that one */
			clip_priv->stats.tx_dropped++;
			return 0;
		}
#endif
		printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	entry = NEIGH2ENTRY(skb->dst->neighbour);
	if (!entry->vccs) {
		if (time_after(jiffies, entry->expires)) {
			/* should be resolved */
			entry->expires = jiffies+ATMARP_RETRY_DELAY*HZ;
			to_atmarpd(act_need,PRIV(dev)->number,entry->ip);
		}
		if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
			skb_queue_tail(&entry->neigh->arp_queue,skb);
		else {
			dev_kfree_skb(skb);
			clip_priv->stats.tx_dropped++;
		}
		return 0;
	}
	DPRINTK("neigh %p, vccs %p\n",entry,entry->vccs);
	ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
	DPRINTK("using neighbour %p, vcc %p\n",skb->dst->neighbour,vcc);
	if (entry->vccs->encap) {
		void *here;

		here = skb_push(skb,RFC1483LLC_LEN);
		memcpy(here,llc_oui,sizeof(llc_oui));
		((u16 *) here)[3] = skb->protocol;
	}
	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = vcc->atm_options;
	entry->vccs->last_use = jiffies;
	DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n",skb,vcc,vcc->dev);
	old = xchg(&entry->vccs->xoff,1); /* assume XOFF ... */
	if (old) {
		printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
		return 0;
	}
	clip_priv->stats.tx_packets++;
	clip_priv->stats.tx_bytes += skb->len;
	(void) vcc->send(vcc,skb);
	if (atm_may_send(vcc,0)) {
		entry->vccs->xoff = 0;
		return 0;
	}
	spin_lock_irqsave(&clip_priv->xoff_lock,flags);
	netif_stop_queue(dev); /* XOFF -> throttle immediately */
	barrier();
	if (!entry->vccs->xoff)
		netif_start_queue(dev);
		/* Oh, we just raced with clip_pop. netif_start_queue should be
		   good enough, because nothing should really be asleep because
		   of the brief netif_stop_queue. If this isn't true or if it
		   changes, use netif_wake_queue instead. */
	spin_unlock_irqrestore(&clip_priv->xoff_lock,flags);
	return 0;
}
示例#8
0
文件: kcov.c 项目: oscardagrach/linux
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
			     unsigned long arg)
{
	struct task_struct *t;
	unsigned long size, unused;

	switch (cmd) {
	case KCOV_INIT_TRACE:
		/*
		 * Enable kcov in trace mode and setup buffer size.
		 * Must happen before anything else.
		 */
		if (kcov->mode != KCOV_MODE_DISABLED)
			return -EBUSY;
		/*
		 * Size must be at least 2 to hold current position and one PC.
		 * Later we allocate size * sizeof(unsigned long) memory,
		 * that must not overflow.
		 */
		size = arg;
		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
			return -EINVAL;
		kcov->size = size;
		kcov->mode = KCOV_MODE_TRACE;
		return 0;
	case KCOV_ENABLE:
		/*
		 * Enable coverage for the current task.
		 * At this point user must have been enabled trace mode,
		 * and mmapped the file. Coverage collection is disabled only
		 * at task exit or voluntary by KCOV_DISABLE. After that it can
		 * be enabled for another task.
		 */
		unused = arg;
		if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
		    kcov->area == NULL)
			return -EINVAL;
		if (kcov->t != NULL)
			return -EBUSY;
		t = current;
		/* Cache in task struct for performance. */
		t->kcov_size = kcov->size;
		t->kcov_area = kcov->area;
		/* See comment in __sanitizer_cov_trace_pc(). */
		barrier();
		WRITE_ONCE(t->kcov_mode, kcov->mode);
		t->kcov = kcov;
		kcov->t = t;
		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
		kcov_get(kcov);
		return 0;
	case KCOV_DISABLE:
		/* Disable coverage for the current task. */
		unused = arg;
		if (unused != 0 || current->kcov != kcov)
			return -EINVAL;
		t = current;
		if (WARN_ON(kcov->t != t))
			return -EINVAL;
		kcov_task_init(t);
		kcov->t = NULL;
		kcov_put(kcov);
		return 0;
	default:
		return -ENOTTY;
	}
}
示例#9
0
/*
 * Initialize the Linux inode, set up the operation vectors and
 * unlock the inode.
 *
 * When reading existing inodes from disk this is called directly
 * from xfs_iget, when creating a new inode it is called from
 * xfs_ialloc after setting up the inode.
 *
 * We are always called with an uninitialised linux inode here.
 * We need to initialise the necessary fields and take a reference
 * on it.
 */
void
xfs_setup_inode(
	struct xfs_inode	*ip)
{
	struct inode		*inode = &ip->i_vnode;
	gfp_t			gfp_mask;

	inode->i_ino = ip->i_ino;
	inode->i_state = I_NEW;

	inode_sb_list_add(inode);
	/* make the inode look hashed for the writeback code */
	hlist_add_fake(&inode->i_hash);

	inode->i_mode	= ip->i_d.di_mode;
	set_nlink(inode, ip->i_d.di_nlink);
	inode->i_uid    = xfs_uid_to_kuid(ip->i_d.di_uid);
	inode->i_gid    = xfs_gid_to_kgid(ip->i_d.di_gid);

	switch (inode->i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		inode->i_rdev =
			MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
			      sysv_minor(ip->i_df.if_u2.if_rdev));
		break;
	default:
		inode->i_rdev = 0;
		break;
	}

	inode->i_generation = ip->i_d.di_gen;
	i_size_write(inode, ip->i_d.di_size);
	inode->i_atime.tv_sec	= ip->i_d.di_atime.t_sec;
	inode->i_atime.tv_nsec	= ip->i_d.di_atime.t_nsec;
	inode->i_mtime.tv_sec	= ip->i_d.di_mtime.t_sec;
	inode->i_mtime.tv_nsec	= ip->i_d.di_mtime.t_nsec;
	inode->i_ctime.tv_sec	= ip->i_d.di_ctime.t_sec;
	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
	xfs_diflags_to_iflags(inode, ip);

	ip->d_ops = ip->i_mount->m_nondir_inode_ops;
	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_op = &xfs_inode_operations;
		inode->i_fop = &xfs_file_operations;
		inode->i_mapping->a_ops = &xfs_address_space_operations;
		break;
	case S_IFDIR:
		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
			inode->i_op = &xfs_dir_ci_inode_operations;
		else
			inode->i_op = &xfs_dir_inode_operations;
		inode->i_fop = &xfs_dir_file_operations;
		ip->d_ops = ip->i_mount->m_dir_inode_ops;
		break;
	case S_IFLNK:
		inode->i_op = &xfs_symlink_inode_operations;
		if (!(ip->i_df.if_flags & XFS_IFINLINE))
			inode->i_mapping->a_ops = &xfs_address_space_operations;
		break;
	default:
		inode->i_op = &xfs_inode_operations;
		init_special_inode(inode, inode->i_mode, inode->i_rdev);
		break;
	}

	/*
	 * Ensure all page cache allocations are done from GFP_NOFS context to
	 * prevent direct reclaim recursion back into the filesystem and blowing
	 * stacks or deadlocking.
	 */
	gfp_mask = mapping_gfp_mask(inode->i_mapping);
	mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));

	/*
	 * If there is no attribute fork no ACL can exist on this inode,
	 * and it can't have any file capabilities attached to it either.
	 */
	if (!XFS_IFORK_Q(ip)) {
		inode_has_no_xattr(inode);
		cache_no_acl(inode);
	}

	xfs_iflags_clear(ip, XFS_INEW);
	barrier();

	unlock_new_inode(inode);
}
示例#10
0
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
	unsigned int vfpsid;
	unsigned int cpu_arch = cpu_architecture();

	if (cpu_arch >= CPU_ARCH_ARMv6)
		on_each_cpu(vfp_enable, NULL, 1);

	/*
	 * First check that there is a VFP that we can use.
	 * The handler is already setup to just log calls, so
	 * we just need to read the VFPSID register.
	 */
	vfp_vector = vfp_testing_entry;
	barrier();
	vfpsid = fmrx(FPSID);
	barrier();
	vfp_vector = vfp_null_entry;

	printk(KERN_INFO "VFP support v0.3: ");
	if (VFP_arch)
		printk("not present\n");
	else if (vfpsid & FPSID_NODOUBLE) {
		printk("no double precision support\n");
	} else {
		hotcpu_notifier(vfp_hotplug, 0);

		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
			(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
			(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

		vfp_vector = vfp_support_entry;

		thread_register_notifier(&vfp_notifier_block);
		vfp_pm_init();

		/*
		 * We detected VFP, and the support code is
		 * in place; report VFP support to userspace.
		 */
		elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
		if (VFP_arch >= 2) {
			elf_hwcap |= HWCAP_VFPv3;

			/*
			 * Check for VFPv3 D16. CPUs in this configuration
			 * only have 16 x 64bit registers.
			 */
			if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
				elf_hwcap |= HWCAP_VFPv3D16;
		}
#endif
		/*
		 * Check for the presence of the Advanced SIMD
		 * load/store instructions, integer and single
		 * precision floating point operations. Only check
		 * for NEON if the hardware has the MVFR registers.
		 */
		if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
#ifdef CONFIG_NEON
			if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
				elf_hwcap |= HWCAP_NEON;
#endif
			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
				elf_hwcap |= HWCAP_VFPv4;
		}
	}
	return 0;
}
示例#11
0
main ()
{
  int	i;


  thds = omp_get_max_threads ();
  if (thds == 1) {
    printf ("should be run this program on multi threads.\n");
    exit (0);
  }
  omp_set_dynamic (0);


  #pragma omp parallel
  {
    int	j;

    #pragma omp for schedule(static,1) lastprivate (prvt)
    for (i=0; i<thds; i++) {
      for (j=0; j<ARRAYSIZ; j++) {
	prvt[j] = i+j;
      }
      barrier (thds);
      for (j=0; j<ARRAYSIZ; j++) {
	if (prvt[j] != i+j) {
          #pragma omp critical
	  errors += 1;
	}
      }
      if (sizeof(prvt) != sizeof(int)*ARRAYSIZ) {
        #pragma omp critical
	errors += 1;
      }
      if (i==0) {
	waittime (1);
      }
      for (j=0; j<ARRAYSIZ; j++) {
	prvt[j] = i+j;
      }
    }

    for (j=0; j<ARRAYSIZ; j++) {
      if (prvt[j] != (thds-1)+j) {
        #pragma omp critical
	errors += 1;
      }
    }
  }


  #pragma omp parallel
  func (thds);


  func (1);


  if (errors == 0) {
    printf ("lastprivate 017 : SUCCESS\n");
    return 0;
  } else {
    printf ("lastprivate 017 : FAILED\n");
    return 1;
  }
}
示例#12
0
/*
 * Package up a bounce condition.
 */
void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
	u32 fpscr, orig_fpscr, fpsid, exceptions;

	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);

	/*
	 * At this point, FPEXC can have the following configuration:
	 *
	 *  EX DEX IXE
	 *  0   1   x   - synchronous exception
	 *  1   x   0   - asynchronous exception
	 *  1   x   1   - sychronous on VFP subarch 1 and asynchronous on later
	 *  0   0   1   - synchronous on VFP9 (non-standard subarch 1
	 *                implementation), undefined otherwise
	 *
	 * Clear various bits and enable access to the VFP so we can
	 * handle the bounce.
	 */
	fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));

	fpsid = fmrx(FPSID);
	orig_fpscr = fpscr = fmrx(FPSCR);

	/*
	 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
	 */
	if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
	    && (fpscr & FPSCR_IXE)) {
		/*
		 * Synchronous exception, emulate the trigger instruction
		 */
		goto emulate;
	}

	if (fpexc & FPEXC_EX) {
#ifndef CONFIG_CPU_FEROCEON
		/*
		 * Asynchronous exception. The instruction is read from FPINST
		 * and the interrupted instruction has to be restarted.
		 */
		trigger = fmrx(FPINST);
		regs->ARM_pc -= 4;
#endif
	} else if (!(fpexc & FPEXC_DEX)) {
		/*
		 * Illegal combination of bits. It can be caused by an
		 * unallocated VFP instruction but with FPSCR.IXE set and not
		 * on VFP subarch 1.
		 */
		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
		goto exit;
	}

	/*
	 * Modify fpscr to indicate the number of iterations remaining.
	 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
	 * whether FPEXC.VECITR or FPSCR.LEN is used.
	 */
	if (fpexc & (FPEXC_EX | FPEXC_VV)) {
		u32 len;

		len = fpexc + (1 << FPEXC_LENGTH_BIT);

		fpscr &= ~FPSCR_LENGTH_MASK;
		fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
	}

	/*
	 * Handle the first FP instruction.  We used to take note of the
	 * FPEXC bounce reason, but this appears to be unreliable.
	 * Emulate the bounced instruction instead.
	 */
	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
	if (exceptions)
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);

	/*
	 * If there isn't a second FP instruction, exit now. Note that
	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
	 */
	if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
		goto exit;

	/*
	 * The barrier() here prevents fpinst2 being read
	 * before the condition above.
	 */
	barrier();
	trigger = fmrx(FPINST2);

 emulate:
	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
	if (exceptions)
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 exit:
	preempt_enable();
}
示例#13
0
文件: islpci_mgt.c 项目: 274914765/C
/*
 * Receive a management frame from the device.
 * This can be an arbitrary number of traps, and at most one response
 * frame for a previous request sent via islpci_mgt_transmit().
 */
int
islpci_mgt_receive(struct net_device *ndev)
{
    islpci_private *priv = netdev_priv(ndev);
    isl38xx_control_block *cb =
        (isl38xx_control_block *) priv->control_block;
    u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
#endif

    /* Only once per interrupt, determine fragment range to
     * process.  This avoids an endless loop (i.e. lockup) if
     * frames come in faster than we can process them. */
    curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
    barrier();

    for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
        pimfor_header_t *header;
        u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
        struct islpci_membuf *buf = &priv->mgmt_rx[index];
        u16 frag_len;
        int size;
        struct islpci_mgmtframe *frame;

        /* I have no idea (and no documentation) if flags != 0
         * is possible.  Drop the frame, reuse the buffer. */
        if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
            printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
                   ndev->name,
                   le16_to_cpu(cb->rx_data_mgmt[index].flags));
            continue;
        }

        /* The device only returns the size of the header(s) here. */
        frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);

        /*
         * We appear to have no way to tell the device the
         * size of a receive buffer.  Thus, if this check
         * triggers, we likely have kernel heap corruption. */
        if (frag_len > MGMT_FRAME_SIZE) {
            printk(KERN_WARNING
                "%s: Bogus packet size of %d (%#x).\n",
                ndev->name, frag_len, frag_len);
            frag_len = MGMT_FRAME_SIZE;
        }

        /* Ensure the results of device DMA are visible to the CPU. */
        pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
                        buf->size, PCI_DMA_FROMDEVICE);

        /* Perform endianess conversion for PIMFOR header in-place. */
        header = pimfor_decode_header(buf->mem, frag_len);
        if (!header) {
            printk(KERN_WARNING "%s: no PIMFOR header found\n",
                   ndev->name);
            continue;
        }

        /* The device ID from the PIMFOR packet received from
         * the MVC is always 0.  We forward a sensible device_id.
         * Not that anyone upstream would care... */
        header->device_id = priv->ndev->ifindex;

#if VERBOSE > SHOW_ERROR_MESSAGES
        DEBUG(SHOW_PIMFOR_FRAMES,
              "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
              header->operation, header->oid, header->device_id,
              header->flags, header->length);

        /* display the buffer contents for debugging */
        display_buffer((char *) header, PIMFOR_HEADER_SIZE);
        display_buffer((char *) header + PIMFOR_HEADER_SIZE,
                   header->length);
#endif

        /* nobody sends these */
        if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
            printk(KERN_DEBUG
                   "%s: errant PIMFOR application frame\n",
                   ndev->name);
            continue;
        }

        /* Determine frame size, skipping OID_INL_TUNNEL headers. */
        size = PIMFOR_HEADER_SIZE + header->length;
        frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
                GFP_ATOMIC);
        if (!frame) {
            printk(KERN_WARNING
                   "%s: Out of memory, cannot handle oid 0x%08x\n",
                   ndev->name, header->oid);
            continue;
        }
        frame->ndev = ndev;
        memcpy(&frame->buf, header, size);
        frame->header = (pimfor_header_t *) frame->buf;
        frame->data = frame->buf + PIMFOR_HEADER_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
        DEBUG(SHOW_PIMFOR_FRAMES,
              "frame: header: %p, data: %p, size: %d\n",
              frame->header, frame->data, size);
#endif

        if (header->operation == PIMFOR_OP_TRAP) {
#if VERBOSE > SHOW_ERROR_MESSAGES
            printk(KERN_DEBUG
                   "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
                   header->oid, header->device_id, header->flags,
                   header->length);
#endif

            /* Create work to handle trap out of interrupt
             * context. */
            INIT_WORK(&frame->ws, prism54_process_trap);
            schedule_work(&frame->ws);

        } else {
            /* Signal the one waiting process that a response
             * has been received. */
            if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
                printk(KERN_WARNING
                       "%s: mgmt response not collected\n",
                       ndev->name);
                kfree(frame);
            }
#if VERBOSE > SHOW_ERROR_MESSAGES
            DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
#endif
            wake_up(&priv->mgmt_wqueue);
        }

    }

    return 0;
}
示例#14
0
/**
 *	probe_irq_on	- begin an interrupt autodetect
 *
 *	Commence probing for an interrupt. The interrupts are scanned
 *	and a mask of potential interrupt lines is returned.
 *
 */
unsigned long probe_irq_on(void)
{
	unsigned long val, delay;
	irq_desc_t *desc;
	unsigned int i;

	down(&probe_sem);
	/*
	 * something may have generated an irq long ago and we want to
	 * flush such a longstanding irq before considering it as spurious.
	 */
	for (i = NR_IRQS-1; i > 0; i--) {
		desc = irq_desc + i;

		spin_lock_irq(&desc->lock);
		if (!irq_desc[i].action)
			irq_desc[i].handler->startup(i);
		spin_unlock_irq(&desc->lock);
	}

	/* Wait for longstanding interrupts to trigger. */
	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
		/* about 20ms delay */ barrier();

	/*
	 * enable any unassigned irqs
	 * (we must startup again here because if a longstanding irq
	 * happened in the previous stage, it may have masked itself)
	 */
	for (i = NR_IRQS-1; i > 0; i--) {
		desc = irq_desc + i;

		spin_lock_irq(&desc->lock);
		if (!desc->action) {
			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
			if (desc->handler->startup(i))
				desc->status |= IRQ_PENDING;
		}
		spin_unlock_irq(&desc->lock);
	}

	/*
	 * Wait for spurious interrupts to trigger
	 */
	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
		/* about 100ms delay */ barrier();

	/*
	 * Now filter out any obviously spurious interrupts
	 */
	val = 0;
	for (i = 0; i < NR_IRQS; i++) {
		irq_desc_t *desc = irq_desc + i;
		unsigned int status;

		spin_lock_irq(&desc->lock);
		status = desc->status;

		if (status & IRQ_AUTODETECT) {
			/* It triggered already - consider it spurious. */
			if (!(status & IRQ_WAITING)) {
				desc->status = status & ~IRQ_AUTODETECT;
				desc->handler->shutdown(i);
			} else
				if (i < 32)
					val |= 1 << i;
		}
		spin_unlock_irq(&desc->lock);
	}

	return val;
}
示例#15
0
static inline void SMC_outw(struct eth_device *dev, word value, dword offset)
{
	*((volatile word*)(dev->iobase + offset)) = value;
	barrier(); *(volatile u32*)(0xc0000000);
}
示例#16
0
static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing,
				   const struct tegra11_emc_table *last_timing,
				   u32 clk_setting)
{
#ifndef EMULATE_CLOCK_SWITCH
	int i, dll_change, pre_wait;
	bool dyn_sref_enabled, zcal_long;

	u32 emc_cfg_reg = emc_readl(EMC_CFG);

	dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
	dll_change = get_dll_change(next_timing, last_timing);
	zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
		(last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);

	/* FIXME: remove steps enumeration below? */

	/* 1. clear clkchange_complete interrupts */
	emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);

	/* 2. disable dynamic self-refresh and preset dqs vref, then wait for
	   possible self-refresh entry/exit and/or dqs vref settled - waiting
	   before the clock change decreases worst case change stall time */
	pre_wait = 0;
	if (dyn_sref_enabled) {
		emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
		emc_writel(emc_cfg_reg, EMC_CFG);
		pre_wait = 5;		/* 5us+ for self-refresh entry/exit */
	}

	/* 2.5 check dq/dqs vref delay */
	if (dqs_preset(next_timing, last_timing)) {
		if (pre_wait < 3)
			pre_wait = 3;	/* 3us+ for dqs vref settled */
	}
	if (pre_wait) {
		emc_timing_update();
		udelay(pre_wait);
	}

	/* 3. disable auto-cal if vref mode is switching - removed */

	/* 4. program burst shadow registers */
	for (i = 0; i < next_timing->burst_regs_num; i++) {
		if (!burst_reg_addr[i])
			continue;
		__raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
	}
	for (i = 0; i < next_timing->emc_trimmers_num; i++) {
		__raw_writel(next_timing->emc_trimmers_0[i],
			(u32)emc0_base + emc_trimmer_offs[i]);
		__raw_writel(next_timing->emc_trimmers_1[i],
			(u32)emc1_base + emc_trimmer_offs[i]);
	}
	emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
	emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
	emc_writel(emc_cfg_reg, EMC_CFG);
	wmb();
	barrier();

	/* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
	   overwrite DFS table setting */
	if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
		overwrite_mrs_wait_cnt(next_timing, zcal_long);

	/* 5.2 disable auto-refresh to save time after clock change */
	emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);

	/* 6. turn Off dll and enter self-refresh on DDR3 */
	if (dram_type == DRAM_TYPE_DDR3) {
		if (dll_change == DLL_CHANGE_OFF)
			ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
		ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
			      EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
	}

	/* 7. flow control marker 2 */
	ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);

	/* 8. exit self-refresh on DDR3 */
	if (dram_type == DRAM_TYPE_DDR3)
		ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);

	/* 9. set dram mode registers */
	set_dram_mode(next_timing, last_timing, dll_change);

	/* 10. issue zcal command if turning zcal On */
	if (zcal_long) {
		ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
		if (dram_dev_num > 1)
			ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
	}

	/* 10.1 dummy write to RO register to remove stall after change */
	ccfifo_writel(0, EMC_CCFIFO_STATUS);

	/* 11.5 program burst_up_down registers if emc rate is going down */
	if (next_timing->rate < last_timing->rate) {
		for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
			__raw_writel(next_timing->burst_up_down_regs[i],
				burst_up_down_reg_addr[i]);
		wmb();
	}

	/* 12-14. read any MC register to ensure the programming is done
	   change EMC clock source register wait for clk change completion */
	do_clock_change(clk_setting);

	/* 14.1 re-enable auto-refresh */
	emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);

	/* 14.2 program burst_up_down registers if emc rate is going up */
	if (next_timing->rate > last_timing->rate) {
		for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
			__raw_writel(next_timing->burst_up_down_regs[i],
				burst_up_down_reg_addr[i]);
		wmb();
	}

	/* 15. restore auto-cal - removed */

	/* 16. restore dynamic self-refresh */
	if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
		emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
		emc_writel(emc_cfg_reg, EMC_CFG);
	}

	/* 17. set zcal wait count */
	if (zcal_long)
		emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);

	/* 18. update restored timing */
	udelay(2);
	emc_timing_update();
#else
	/* FIXME: implement */
	pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
		next_timing->rate, clk_setting);
#endif
}
示例#17
0
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
	struct floppy_state *fs = (struct floppy_state *) dev_id;
	struct swim3 __iomem *sw = fs->swim3;
	int intr, err, n;
	int stat, resid;
	struct dbdma_regs __iomem *dr;
	struct dbdma_cmd *cp;
	unsigned long flags;
	struct request *req = fs->cur_req;

	swim3_dbg("* interrupt, state=%d\n", fs->state);

	spin_lock_irqsave(&swim3_lock, flags);
	intr = in_8(&sw->intr);
	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
	if ((intr & ERROR_INTR) && fs->state != do_transfer)
		swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
			  fs->state, rq_data_dir(req), intr, err);
	switch (fs->state) {
	case locating:
		if (intr & SEEN_SECTOR) {
			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (sw->ctrack == 0xff) {
				swim3_err("%s", "Seen sector but cyl=ff?\n");
				fs->cur_cyl = -1;
				if (fs->retries > 5) {
					swim3_end_request(fs, -EIO, 0);
					fs->state = idle;
					start_request(fs);
				} else {
					fs->state = jogging;
					act(fs);
				}
				break;
			}
			fs->cur_cyl = sw->ctrack;
			fs->cur_sector = sw->csect;
			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
				swim3_err("Expected cyl %d, got %d\n",
					  fs->expect_cyl, fs->cur_cyl);
			fs->state = do_transfer;
			act(fs);
		}
		break;
	case seeking:
	case jogging:
		if (sw->nseek == 0) {
			out_8(&sw->control_bic, DO_SEEK);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (fs->state == seeking)
				++fs->retries;
			fs->state = settling;
			act(fs);
		}
		break;
	case settling:
		out_8(&sw->intr_enable, 0);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		act(fs);
		break;
	case do_transfer:
		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
			break;
		out_8(&sw->intr_enable, 0);
		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
		out_8(&sw->select, RELAX);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		dr = fs->dma;
		cp = fs->dma_cmd;
		if (rq_data_dir(req) == WRITE)
			++cp;
		/*
		 * Check that the main data transfer has finished.
		 * On writing, the swim3 sometimes doesn't use
		 * up all the bytes of the postamble, so we can still
		 * see DMA active here.  That doesn't matter as long
		 * as all the sector data has been transferred.
		 */
		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
			/* wait a little while for DMA to complete */
			for (n = 0; n < 100; ++n) {
				if (cp->xfer_status != 0)
					break;
				udelay(1);
				barrier();
			}
		}
		/* turn off DMA */
		out_le32(&dr->control, (RUN | PAUSE) << 16);
		stat = ld_le16(&cp->xfer_status);
		resid = ld_le16(&cp->res_count);
		if (intr & ERROR_INTR) {
			n = fs->scount - 1 - resid / 512;
			if (n > 0) {
				blk_update_request(req, 0, n << 9);
				fs->req_sector += n;
			}
			if (fs->retries < 5) {
				++fs->retries;
				act(fs);
			} else {
				swim3_err("Error %sing block %ld (err=%x)\n",
				       rq_data_dir(req) == WRITE? "writ": "read",
				       (long)blk_rq_pos(req), err);
				swim3_end_request(fs, -EIO, 0);
				fs->state = idle;
			}
		} else {
			if ((stat & ACTIVE) == 0 || resid != 0) {
				/* musta been an error */
				swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
				swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
					  fs->state, rq_data_dir(req), intr, err);
				swim3_end_request(fs, -EIO, 0);
				fs->state = idle;
				start_request(fs);
				break;
			}
			fs->retries = 0;
			if (swim3_end_request(fs, 0, fs->scount << 9)) {
				fs->req_sector += fs->scount;
				if (fs->req_sector > fs->secpertrack) {
					fs->req_sector -= fs->secpertrack;
					if (++fs->head > 1) {
						fs->head = 0;
						++fs->req_cyl;
					}
				}
				act(fs);
			} else
				fs->state = idle;
		}
		if (fs->state == idle)
			start_request(fs);
		break;
	default:
		swim3_err("Don't know what to do in state %d\n", fs->state);
	}
	spin_unlock_irqrestore(&swim3_lock, flags);
	return IRQ_HANDLED;
}
示例#18
0
文件: imx.c 项目: ivucica/linux
static void
imx_set_termios(struct uart_port *port, struct ktermios *termios,
		   struct ktermios *old)
{
	struct imx_port *sport = (struct imx_port *)port;
	unsigned long flags;
	unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;

	/*
	 * If we don't support modem control lines, don't allow
	 * these to be set.
	 */
	if (0) {
		termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
		termios->c_cflag |= CLOCAL;
	}

	/*
	 * We only support CS7 and CS8.
	 */
	while ((termios->c_cflag & CSIZE) != CS7 &&
	       (termios->c_cflag & CSIZE) != CS8) {
		termios->c_cflag &= ~CSIZE;
		termios->c_cflag |= old_csize;
		old_csize = CS8;
	}

	if ((termios->c_cflag & CSIZE) == CS8)
		ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
	else
		ucr2 = UCR2_SRST | UCR2_IRTS;

	if (termios->c_cflag & CRTSCTS) {
		if( sport->have_rtscts ) {
			ucr2 &= ~UCR2_IRTS;
			ucr2 |= UCR2_CTSC;
		} else {
			termios->c_cflag &= ~CRTSCTS;
		}
	}

	if (termios->c_cflag & CSTOPB)
		ucr2 |= UCR2_STPB;
	if (termios->c_cflag & PARENB) {
		ucr2 |= UCR2_PREN;
		if (termios->c_cflag & PARODD)
			ucr2 |= UCR2_PROE;
	}

	/*
	 * Ask the core to calculate the divisor for us.
	 */
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
	quot = uart_get_divisor(port, baud);

	spin_lock_irqsave(&sport->port.lock, flags);

	sport->port.read_status_mask = 0;
	if (termios->c_iflag & INPCK)
		sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
	if (termios->c_iflag & (BRKINT | PARMRK))
		sport->port.read_status_mask |= URXD_BRK;

	/*
	 * Characters to ignore
	 */
	sport->port.ignore_status_mask = 0;
	if (termios->c_iflag & IGNPAR)
		sport->port.ignore_status_mask |= URXD_PRERR;
	if (termios->c_iflag & IGNBRK) {
		sport->port.ignore_status_mask |= URXD_BRK;
		/*
		 * If we're ignoring parity and break indicators,
		 * ignore overruns too (for real raw support).
		 */
		if (termios->c_iflag & IGNPAR)
			sport->port.ignore_status_mask |= URXD_OVRRUN;
	}

	del_timer_sync(&sport->timer);

	/*
	 * Update the per-port timeout.
	 */
	uart_update_timeout(port, termios->c_cflag, baud);

	/*
	 * disable interrupts and drain transmitter
	 */
	old_ucr1 = UCR1((u32)sport->port.membase);
	UCR1((u32)sport->port.membase) &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);

	while ( !(USR2((u32)sport->port.membase) & USR2_TXDC))
		barrier();

	/* then, disable everything */
	old_txrxen = UCR2((u32)sport->port.membase) & ( UCR2_TXEN | UCR2_RXEN );
	UCR2((u32)sport->port.membase) &= ~( UCR2_TXEN | UCR2_RXEN);

	/* set the parity, stop bits and data size */
	UCR2((u32)sport->port.membase) = ucr2;

	/* set the baud rate. We assume uartclk = 16 MHz
	 *
	 * baud * 16   UBIR - 1
	 * --------- = --------
	 *  uartclk    UBMR - 1
	 */
	UBIR((u32)sport->port.membase) = (baud / 100) - 1;
	UBMR((u32)sport->port.membase) = 10000 - 1;

	UCR1((u32)sport->port.membase) = old_ucr1;
	UCR2((u32)sport->port.membase) |= old_txrxen;

	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
		imx_enable_ms(&sport->port);

	spin_unlock_irqrestore(&sport->port.lock, flags);
}
示例#19
0
static void
ip3106_set_termios(struct uart_port *port, struct termios *termios,
		   struct termios *old)
{
	struct ip3106_port *sport = (struct ip3106_port *)port;
	unsigned long flags;
	unsigned int lcr_fcr, old_ien, baud, quot;
	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;

	/*
	 * We only support CS7 and CS8.
	 */
	while ((termios->c_cflag & CSIZE) != CS7 &&
	       (termios->c_cflag & CSIZE) != CS8) {
		termios->c_cflag &= ~CSIZE;
		termios->c_cflag |= old_csize;
		old_csize = CS8;
	}

	if ((termios->c_cflag & CSIZE) == CS8)
		lcr_fcr = IP3106_UART_LCR_8BIT;
	else
		lcr_fcr = 0;

	if (termios->c_cflag & CSTOPB)
		lcr_fcr |= IP3106_UART_LCR_2STOPB;
	if (termios->c_cflag & PARENB) {
		lcr_fcr |= IP3106_UART_LCR_PAREN;
		if (!(termios->c_cflag & PARODD))
			lcr_fcr |= IP3106_UART_LCR_PAREVN;
	}

	/*
	 * Ask the core to calculate the divisor for us.
	 */
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
	quot = uart_get_divisor(port, baud);

	spin_lock_irqsave(&sport->port.lock, flags);

#if	0	/* REVISIT */
	sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
	sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
	if (termios->c_iflag & INPCK)
		sport->port.read_status_mask |=
				UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
	if (termios->c_iflag & (BRKINT | PARMRK))
		sport->port.read_status_mask |=
				UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);

	/*
	 * Characters to ignore
	 */
	sport->port.ignore_status_mask = 0;
	if (termios->c_iflag & IGNPAR)
		sport->port.ignore_status_mask |=
				UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
	if (termios->c_iflag & IGNBRK) {
		sport->port.ignore_status_mask |=
				UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
		/*
		 * If we're ignoring parity and break indicators,
		 * ignore overruns too (for real raw support).
		 */
		if (termios->c_iflag & IGNPAR)
			sport->port.ignore_status_mask |=
				UTSR1_TO_SM(UTSR1_ROR);
	}
#endif

	del_timer_sync(&sport->timer);

	/*
	 * Update the per-port timeout.
	 */
	uart_update_timeout(port, termios->c_cflag, baud);

	/*
	 * disable interrupts and drain transmitter
	 */
	old_ien = serial_in(sport, IP3106_IEN);
	serial_out(sport, IP3106_IEN, old_ien & ~(IP3106_UART_INT_ALLTX |
					IP3106_UART_INT_ALLRX));

	while (serial_in(sport, IP3106_FIFO) & IP3106_UART_FIFO_TXFIFO_STA)
		barrier();

	/* then, disable everything */
	serial_out(sport, IP3106_IEN, 0);

	/* Reset the Rx and Tx FIFOs too */
	lcr_fcr |= IP3106_UART_LCR_TX_RST;
	lcr_fcr |= IP3106_UART_LCR_RX_RST;

	/* set the parity, stop bits and data size */
	serial_out(sport, IP3106_LCR, lcr_fcr);

	/* set the baud rate */
	quot -= 1;
	serial_out(sport, IP3106_BAUD, quot);

	serial_out(sport, IP3106_ICLR, -1);

	serial_out(sport, IP3106_IEN, old_ien);

	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
		ip3106_enable_ms(&sport->port);

	spin_unlock_irqrestore(&sport->port.lock, flags);
}
示例#20
0
static void
common_shutdown_1(void *generic_ptr)
{
	struct halt_info *how = (struct halt_info *)generic_ptr;
	struct percpu_struct *cpup;
	unsigned long *pflags, flags;
	int cpuid = smp_processor_id();

	/* No point in taking interrupts anymore. */
	local_irq_disable();

	cpup = (struct percpu_struct *)
			((unsigned long)hwrpb + hwrpb->processor_offset
			 + hwrpb->processor_size * cpuid);
	pflags = &cpup->flags;
	flags = *pflags;

	/* Clear reason to "default"; clear "bootstrap in progress". */
	flags &= ~0x00ff0001UL;

#ifdef CONFIG_SMP
	/* Secondaries halt here. */
	if (cpuid != boot_cpuid) {
		flags |= 0x00040000UL; /* "remain halted" */
		*pflags = flags;
		set_cpu_present(cpuid, false);
		set_cpu_possible(cpuid, false);
		halt();
	}
#endif

	if (how->mode == LINUX_REBOOT_CMD_RESTART) {
		if (!how->restart_cmd) {
			flags |= 0x00020000UL; /* "cold bootstrap" */
		} else {
			/* For SRM, we could probably set environment
			   variables to get this to work.  We'd have to
			   delay this until after srm_paging_stop unless
			   we ever got srm_fixup working.

			   At the moment, SRM will use the last boot device,
			   but the file and flags will be the defaults, when
			   doing a "warm" bootstrap.  */
			flags |= 0x00030000UL; /* "warm bootstrap" */
		}
	} else {
		flags |= 0x00040000UL; /* "remain halted" */
	}
	*pflags = flags;

#ifdef CONFIG_SMP
	/* Wait for the secondaries to halt. */
	set_cpu_present(boot_cpuid, false);
	set_cpu_possible(boot_cpuid, false);
	while (cpus_weight(cpu_present_map))
		barrier();
#endif

	/* If booted from SRM, reset some of the original environment. */
	if (alpha_using_srm) {
#ifdef CONFIG_DUMMY_CONSOLE
		/* If we've gotten here after SysRq-b, leave interrupt
		   context before taking over the console. */
		if (in_interrupt())
			irq_exit();
		/* This has the effect of resetting the VGA video origin.  */
		take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
#endif
		pci_restore_srm_config();
		set_hae(srm_hae);
	}

	if (alpha_mv.kill_arch)
		alpha_mv.kill_arch(how->mode);

	if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
		/* Unfortunately, since MILO doesn't currently understand
		   the hwrpb bits above, we can't reliably halt the 
		   processor and keep it halted.  So just loop.  */
		return;
	}

	if (alpha_using_srm)
		srm_paging_stop();

	halt();
}
示例#21
0
int atari_keyb_init(void)
{
	int error;

	if (atari_keyb_done)
		return 0;

	kb_state.state = KEYBOARD;
	kb_state.len = 0;

	error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
			    IRQ_TYPE_SLOW, "keyboard,mouse,MIDI",
			    atari_keyboard_interrupt);
	if (error)
		return error;

	atari_turnoff_irq(IRQ_MFP_ACIA);
	do {
		
		acia.key_ctrl = ACIA_RESET |
				((atari_switches & ATARI_SWITCH_IKBD) ?
				 ACIA_RHTID : 0);
		(void)acia.key_ctrl;
		(void)acia.key_data;

		
		acia.mid_ctrl = ACIA_RESET |
				((atari_switches & ATARI_SWITCH_MIDI) ?
				 ACIA_RHTID : 0);
		(void)acia.mid_ctrl;
		(void)acia.mid_data;

		
		
		
		
		acia.key_ctrl = (ACIA_DIV64|ACIA_D8N1S|ACIA_RIE) |
				((atari_switches & ATARI_SWITCH_IKBD) ?
				 ACIA_RHTID : ACIA_RLTID);

		acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S |
				((atari_switches & ATARI_SWITCH_MIDI) ?
				 ACIA_RHTID : 0);

	
	} while ((st_mfp.par_dt_reg & 0x10) == 0);

	
	st_mfp.active_edge &= ~0x10;
	atari_turnon_irq(IRQ_MFP_ACIA);

	ikbd_self_test = 1;
	ikbd_reset();
	self_test_last_rcv = jiffies;
	while (time_before(jiffies, self_test_last_rcv + HZ/4))
		barrier();
	
	if (ikbd_self_test == 1)
		printk(KERN_ERR "WARNING: keyboard self test failed!\n");
	ikbd_self_test = 0;

	ikbd_mouse_disable();
	ikbd_joystick_disable();

#ifdef FIXED_ATARI_JOYSTICK
	atari_joystick_init();
#endif

	
	atari_keyb_done = 1;
	return 0;
}
示例#22
0
文件: bigarray3.c 项目: fuzzie/slcore
noinline
int foo(int*a) { barrier(); return a[42]; }
int initParallelEnv(){
    omp_set_num_threads(THREADS);

    /* Setup MPI programming environment */
	MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &threadSupport);

	comm = MPI_COMM_WORLD;
	MPI_Comm_size(comm, &numMPIprocs);
	MPI_Comm_rank(comm, &myMPIRank);

	/*Find the number of bytes for an int */
	sizeInteger = sizeof(int);

	/* Find the processor name of each MPI process */
    MPI_Get_processor_name(myProcName, &procNameLen);

	/* Use processor name to create a communicator
	 * across node boundaries.
	 */
	setupCommunicators();

	/* setup OpenMP programming environment */
    #pragma omp parallel shared(numThreads,globalIDarray,myMPIRank)
   {
	   numThreads = omp_get_num_threads();
	   myThreadID = omp_get_thread_num();

	   /* Allocate space for globalIDarray */
        #pragma omp single
       {
           globalIDarray = (int *)malloc(numThreads * sizeof(int));
       }

	   /*calculate the globalID for each thread */
	   globalIDarray[myThreadID] = (myMPIRank * numThreads) + myThreadID;
   }
    MPI_Barrier(comm);

    gaspi_config_t config;
    GASPI(config_get(&config));
    config.qp_count = THREADS;
    GASPI(config_set(config));
    /* GASPI setup */
    GASPI(proc_init(GASPI_BLOCK));

    gaspi_rank_t totalRanks;
    GASPI(proc_num(&totalRanks));

    gaspi_rank_t rank;
    GASPI(proc_rank(&rank));

    gaspi_number_t q_num;
    GASPI(queue_num(&q_num));
    assert (q_num == THREADS);

    GASPI(barrier (GASPI_GROUP_ALL, GASPI_BLOCK));
    // ok, we will continue to use the MPI ranks, just make sure GASPI and MPI ranks are identical
    // this is not guaranteed, so depending on the setup this may fail.
    assert (totalRanks == numMPIprocs);
    assert (rank == myMPIRank);

   /* set parallel info in benchmark report type */
   setParallelInfo(numMPIprocs,threadSupport,numThreads);

return 0;
}
示例#24
0
/* Only one of these per ring may run concurrently - enforced by drivers */
static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
				unsigned char *data, s64 timestamp)
{
	int ret = 0;
	unsigned char *temp_ptr, *change_test_ptr;

	/* initial store */
	if (unlikely(ring->write_p == NULL)) {
		ring->write_p = ring->data;
		/* Doesn't actually matter if this is out of the set
		 * as long as the read pointer is valid before this
		 * passes it - guaranteed as set later in this function.
		 */
		ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
	}
	/* Copy data to where ever the current write pointer says */
	memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
	barrier();
	/* Update the pointer used to get most recent value.
	 * Always valid as either points to latest or second latest value.
	 * Before this runs it is null and read attempts fail with -EAGAIN.
	 */
	barrier();
	/* temp_ptr used to ensure we never have an invalid pointer
	 * it may be slightly lagging, but never invalid
	 */
	temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
	/* End of ring, back to the beginning */
	if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
		temp_ptr = ring->data;
	/* Update the write pointer
	 * always valid as long as this is the only function able to write.
	 * Care needed with smp systems to ensure more than one ring fill
	 * is never scheduled.
	 */
	ring->write_p = temp_ptr;

	if (ring->read_p == NULL)
		ring->read_p = ring->data;
	/* Buffer full - move the read pointer and create / escalate
	 * ring event */
	/* Tricky case - if the read pointer moves before we adjust it.
	 * Handle by not pushing if it has moved - may result in occasional
	 * unnecessary buffer full events when it wasn't quite true.
	 */
	else if (ring->write_p == ring->read_p) {
		change_test_ptr = ring->read_p;
		temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
		if (temp_ptr
		    == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
			temp_ptr = ring->data;
		}
		/* We are moving pointer on one because the ring is full.  Any
		 * change to the read pointer will be this or greater.
		 */
		if (change_test_ptr == ring->read_p)
			ring->read_p = temp_ptr;
	}
	/* investigate if our event barrier has been passed */
	/* There are definite 'issues' with this and chances of
	 * simultaneous read */
	/* Also need to use loop count to ensure this only happens once */
	ring->half_p += ring->buf.bytes_per_datum;
	if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
		ring->half_p = ring->data;
	if (ring->half_p == ring->read_p) {
		ring->buf.stufftoread = true;
		wake_up_interruptible(&ring->buf.pollq);
	}
	return ret;
}
示例#25
0
文件: xfs_iops.c 项目: Apaisal/linux
/*
 * Initialize the Linux inode, set up the operation vectors and
 * unlock the inode.
 *
 * When reading existing inodes from disk this is called directly
 * from xfs_iget, when creating a new inode it is called from
 * xfs_ialloc after setting up the inode.
 *
 * We are always called with an uninitialised linux inode here.
 * We need to initialise the necessary fields and take a reference
 * on it.
 */
void
xfs_setup_inode(
	struct xfs_inode	*ip)
{
	struct inode		*inode = &ip->i_vnode;

	inode->i_ino = ip->i_ino;
	inode->i_state = I_NEW;

	inode_sb_list_add(inode);
	/* make the inode look hashed for the writeback code */
	hlist_add_fake(&inode->i_hash);

	inode->i_mode	= ip->i_d.di_mode;
	set_nlink(inode, ip->i_d.di_nlink);
	inode->i_uid	= ip->i_d.di_uid;
	inode->i_gid	= ip->i_d.di_gid;

	switch (inode->i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		inode->i_rdev =
			MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
			      sysv_minor(ip->i_df.if_u2.if_rdev));
		break;
	default:
		inode->i_rdev = 0;
		break;
	}

	inode->i_generation = ip->i_d.di_gen;
	i_size_write(inode, ip->i_d.di_size);
	inode->i_atime.tv_sec	= ip->i_d.di_atime.t_sec;
	inode->i_atime.tv_nsec	= ip->i_d.di_atime.t_nsec;
	inode->i_mtime.tv_sec	= ip->i_d.di_mtime.t_sec;
	inode->i_mtime.tv_nsec	= ip->i_d.di_mtime.t_nsec;
	inode->i_ctime.tv_sec	= ip->i_d.di_ctime.t_sec;
	inode->i_ctime.tv_nsec	= ip->i_d.di_ctime.t_nsec;
	xfs_diflags_to_iflags(inode, ip);

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_op = &xfs_inode_operations;
		inode->i_fop = &xfs_file_operations;
		inode->i_mapping->a_ops = &xfs_address_space_operations;
		break;
	case S_IFDIR:
		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
			inode->i_op = &xfs_dir_ci_inode_operations;
		else
			inode->i_op = &xfs_dir_inode_operations;
		inode->i_fop = &xfs_dir_file_operations;
		break;
	case S_IFLNK:
		inode->i_op = &xfs_symlink_inode_operations;
		if (!(ip->i_df.if_flags & XFS_IFINLINE))
			inode->i_mapping->a_ops = &xfs_address_space_operations;
		break;
	default:
		inode->i_op = &xfs_inode_operations;
		init_special_inode(inode, inode->i_mode, inode->i_rdev);
		break;
	}

	/*
	 * If there is no attribute fork no ACL can exist on this inode,
	 * and it can't have any file capabilities attached to it either.
	 */
	if (!XFS_IFORK_Q(ip)) {
		inode_has_no_xattr(inode);
		cache_no_acl(inode);
	}

	xfs_iflags_clear(ip, XFS_INEW);
	barrier();

	unlock_new_inode(inode);
}
示例#26
0
文件: barrier.cpp 项目: K-ballo/hpx
 barrier barrier::create_global_barrier()
 {
     runtime& rt = get_runtime();
     util::runtime_configuration const& cfg = rt.get_config();
     return barrier("/0/hpx/global_barrier", cfg.get_num_localities());
 }
示例#27
0
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == cnt)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);

	if (preempt_count() == cnt)
		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
	add_preempt_count(cnt);
	barrier();
}
int main(int argc, char** argv) {
    if (argc != 2)
        return Usage(argc, argv);

    // Windows
    // -------
    // any: 11000 connects/sec, WSAEADDRINUSE, netstat: 16300 client, 700 server
    // client close: 11000 connects/sec, WSAEADDRINUSE, netstat: 16000 client connections
    // server close: 6000 connects/sec, no errors, netstat: 16000 server connections, client connections slowly growing
    // simultaneous close: 6500 connects/sec, WSAEADDRINUSE, netstat: 16000 server, 16000 client
    // graceful close: 6500 connects/sec, occasional WSAECONNRESET on recv(), netstat: 16000 server

    // Linux
    // -----
    // any: wide ranging connects/sec (250-80000), no errors, netstat: 2700 server, 14000 client
    // client close: 21000 connects/sec, no errors, netstat: 16000 client connections
    // server close: 16000 connects/sec, no errors, netstat: 16000 server connections
    // simultaneous close: 48000 connects/sec, no errors, netstat: 8000 server, 8000 client
    // graceful close: 17000 connects/sec, no errors, netstat: 16000 server

    SynchronizationPolicy* client_policy = NULL;
    SynchronizationPolicy* server_policy = NULL;
    Event event_initially_nonsignaled(false, false);
    Event event_initially_signaled(false, true);
    Barrier barrier(2);

    if (!strcmp(argv[1], "any")) {
        client_policy = server_policy = new SynchronizationPolicy();    // policy does nothing
    } else if (!strcmp(argv[1], "client")) {
        // server waits for client to do closesocket(), then client tells server to do closesocket()
        client_policy = new WaitSynchronizationPolicy(&event_initially_signaled, &event_initially_nonsignaled);
        server_policy = new WaitSynchronizationPolicy(&event_initially_nonsignaled, &event_initially_signaled);
    } else if (!strcmp(argv[1], "server")) {
        // client waits for server to do closesocket(), then server tells client to do closesocket()
        client_policy = new WaitSynchronizationPolicy(&event_initially_nonsignaled, &event_initially_signaled);
        server_policy = new WaitSynchronizationPolicy(&event_initially_signaled, &event_initially_nonsignaled);
    } else if (!strcmp(argv[1], "simultaneous")) {
        // client and server try to call closesocket() approximately simultaneously.
        client_policy = new BarrierSynchronizationPolicy(&barrier);
        server_policy = new BarrierSynchronizationPolicy(&barrier);
    } else if (!strcmp(argv[1], "graceful")) {
        client_policy = new GracefulShutdownSynchronizationPolicy();
        server_policy = new SynchronizationPolicy();    // don't do anything special
    } else {
        return Usage(argc, argv);
    }

#ifdef _WIN32
    WSADATA wsaData;
    int result = WSAStartup(MAKEWORD(2, 2), &wsaData);
    if (result != 0) {
        fprintf(stderr, "error: WSAStartup: %d\n", result);
        exit(EXIT_FAILURE);
    }
#endif

    size_t server_connections, client_connections;
    NetStat(&server_connections, &client_connections);
    if (server_connections != 0)
        fprintf(stderr, "There are %zu lingering server connections from 127.0.0.1:%d in netstat.\n", server_connections, PORT);
    if (client_connections != 0)
        fprintf(stderr, "There are %zu lingering client connections to 127.0.0.1:%d in netstat.\n", client_connections, PORT);
    if ((server_connections != 0) || (client_connections != 0)) {
        fprintf(stderr, "Wait a minute and try again.\n");
        exit(EXIT_FAILURE);
    }

    StartThread(TestServerThread, server_policy);
    StartThread(TestClientThread, client_policy);

    while (g_warning_count < MAX_ERROR_COUNT) {
        size_t old_connect_count = g_connect_count;
        Sleep(1000);
        size_t new_connect_count = g_connect_count;
        NetStat(&server_connections, &client_connections);
        printf("connects per second: %zu, server connections: %zu, client connections: %zu\n",
               new_connect_count - old_connect_count, server_connections, client_connections);
    }

    fprintf(stderr, "exiting after %zu errors\n", MAX_ERROR_COUNT);
    NetStat(&server_connections, &client_connections);
    printf("server connections: %zu, client connections: %zu\n",
           server_connections, client_connections);
    exit(EXIT_FAILURE);

    return 0;
}
示例#29
0
static int clip_start_xmit(struct sk_buff *skb, knet_netdev_t *dev)
{
    struct clip_priv *clip_priv = PRIV(dev);
    struct atmarp_entry *entry;
    struct atm_vcc *vcc;
    int old;
    unsigned long flags;

    DPRINTK("clip_start_xmit (skb %p)\n", skb);
    if (!skb->dst)
    {
	printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
	dev_kfree_skb(skb);
	clip_priv->stats.tx_dropped++;
	return 0;
    }
    if (!skb->dst->neighbour)
    {
	printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR!\n");
	dev_kfree_skb(skb);
	clip_priv->stats.tx_dropped++;
	return 0;
    }
    entry = NEIGH2ENTRY(skb->dst->neighbour);
    if (!entry->vccs)
    {
	if (time_after(jiffies, entry->expires))
	{
	    /* should be resolved */
	    entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
	} 
	if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
	    skb_queue_tail(&entry->neigh->arp_queue, skb);
	else
	{
	    dev_kfree_skb(skb);
	    clip_priv->stats.tx_dropped++;
	}

	/* If a vcc was not resolved for a long time, it sends an InARP
	 * packet every 5 minutes. But if the other side connected now
	 * we do not want to wait.
	 */
	all_clip_vccs_start_resolving();
	return 0;
    }
    DPRINTK("neigh %p, vccs %p\n", entry, entry->vccs);
    ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
    DPRINTK("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc);
    if (entry->vccs->encap)
    {
	void *here;

	here = skb_push(skb, RFC1483LLC_LEN);
	memcpy(here, llc_oui, sizeof(llc_oui));
	((u16 *) here)[3] = skb->protocol;
    }
    atomic_add(skb->truesize, &vcc->tx_inuse);
    ATM_SKB(skb)->iovcnt = 0;
    ATM_SKB(skb)->atm_options = vcc->atm_options;
    entry->vccs->last_use = jiffies;
    DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc,vcc->dev);
    old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
    if (old)
    {
	printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
	return 0;
    }
    clip_priv->stats.tx_packets++;
    clip_priv->stats.tx_bytes += skb->len;
    vcc->dev->ops->send(vcc, skb);
    if (atm_may_send(vcc, 0))
    {
	entry->vccs->xoff = 0;
	return 0;
    }
    spin_lock_irqsave(&clip_priv->xoff_lock, flags);
    knet_netdev_stop_queue(dev); /* XOFF -> throttle immediately */
    barrier();
    if (!entry->vccs->xoff)
	knet_netdev_start_queue(dev);
    /* Oh, we just raced with clip_pop. netif_start_queue should be
       good enough, because nothing should really be asleep because
       of the brief netif_stop_queue. If this isn't true or if it
       changes, use netif_wake_queue instead. */
    spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
    return 0;
}
示例#30
0
/**
 * cdns_uart_console_wait_tx - Wait for the TX to be full
 * @port: Handle to the uart port structure
 */
static void cdns_uart_console_wait_tx(struct uart_port *port)
{
	while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
		barrier();
}