コード例 #1
0
ファイル: command.c プロジェクト: AllenWeb/linux
static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
			       const u8 *param, size_t param_len,
			       struct sk_buff **skb)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq);
	struct hcp_exec_waiter hcp_ew;
	hcp_ew.wq = &ew_wq;
	hcp_ew.exec_complete = false;
	hcp_ew.result_skb = NULL;

	pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);

	/* TODO: Define hci cmd execution delay. Should it be the same
	 * for all commands?
	 */
	hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe,
						    NFC_HCI_HCP_COMMAND, cmd,
						    param, param_len,
						    nfc_hci_execute_cb, &hcp_ew,
						    3000);
	if (hcp_ew.exec_result < 0)
		return hcp_ew.exec_result;

	wait_event(ew_wq, hcp_ew.exec_complete == true);

	if (hcp_ew.exec_result == 0) {
		if (skb)
			*skb = hcp_ew.result_skb;
		else
			kfree_skb(hcp_ew.result_skb);
	}

	return hcp_ew.exec_result;
}
コード例 #2
0
int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
			  unsigned long timeout, struct i2o_dma *dma)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	struct i2o_exec_wait *wait;
	static u32 tcntxt = 0x80000000;
	unsigned long flags;
	int rc = 0;

	wait = i2o_exec_wait_alloc();
	if (!wait) {
		i2o_msg_nop(c, msg);
		return -ENOMEM;
	}

	if (tcntxt == 0xffffffff)
		tcntxt = 0x80000000;

	if (dma)
		wait->dma = *dma;

	/*
                                                                  
                                                                      
                                                                   
  */
	msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
	wait->tcntxt = tcntxt++;
	msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);

	wait->wq = &wq;
	/*
                                                                    
                                                           
  */
	list_add(&wait->list, &i2o_exec_wait_list);

	/*
                                                                   
                                                                        
  */
	i2o_msg_post(c, msg);

	wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);

	spin_lock_irqsave(&wait->lock, flags);

	wait->wq = NULL;

	if (wait->complete)
		rc = le32_to_cpu(wait->msg->body[0]) >> 24;
	else {
		/*
                                                             
                                                          
                                                       
    
                             
   */
		if (dma)
コード例 #3
0
/**
 * 	i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
 *	@c: controller
 *	@msg: message to post
 *	@timeout: time in seconds to wait
 *	@dma: i2o_dma struct of the DMA buffer to free on failure
 *
 * 	This API allows an OSM to post a message and then be told whether or
 *	not the system received a successful reply. If the message times out
 *	then the value '-ETIMEDOUT' is returned. This is a special case. In
 *	this situation the message may (should) complete at an indefinite time
 *	in the future. When it completes it will use the memory buffer
 *	attached to the request. If -ETIMEDOUT is returned then the memory
 *	buffer must not be freed. Instead the event completion will free them
 *	for you. In all other cases the buffer are your problem.
 *
 *	Returns 0 on success, negative error code on timeout or positive error
 *	code from reply.
 */
int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
			  unsigned long timeout, struct i2o_dma *dma)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	struct i2o_exec_wait *wait;
	static u32 tcntxt = 0x80000000;
	unsigned long flags;
	int rc = 0;

	wait = i2o_exec_wait_alloc();
	if (!wait) {
		i2o_msg_nop(c, msg);
		return -ENOMEM;
	}

	if (tcntxt == 0xffffffff)
		tcntxt = 0x80000000;

	if (dma)
		wait->dma = *dma;

	/*
	 * Fill in the message initiator context and transaction context.
	 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
	 * so we could find a POST WAIT reply easier in the reply handler.
	 */
	msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
	wait->tcntxt = tcntxt++;
	msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);

	wait->wq = &wq;
	/*
	 * we add elements to the head, because if a entry in the list will
	 * never be removed, we have to iterate over it every time
	 */
	list_add(&wait->list, &i2o_exec_wait_list);

	/*
	 * Post the message to the controller. At some point later it will
	 * return. If we time out before it returns then complete will be zero.
	 */
	i2o_msg_post(c, msg);

	wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);

	spin_lock_irqsave(&wait->lock, flags);

	wait->wq = NULL;

	if (wait->complete)
		rc = le32_to_cpu(wait->msg->body[0]) >> 24;
	else {
		/*
		 * We cannot remove it now. This is important. When it does
		 * terminate (which it must do if the controller has not
		 * died...) then it will otherwise scribble on stuff.
		 *
		 * FIXME: try abort message
		 */
		if (dma)
コード例 #4
0
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
	DECLARE_WAITQUEUE(wait, current);
	int temp;

	/* ensure there are no more active urbs */
	add_wait_queue(&unlink_wakeup, &wait);
	set_current_state(TASK_UNINTERRUPTIBLE);
	dev->wait = &unlink_wakeup;
	temp = unlink_urbs(dev, &dev->txq) +
		unlink_urbs(dev, &dev->rxq);

	/* maybe wait for deletions to finish. */
	while (!skb_queue_empty(&dev->rxq)
		&& !skb_queue_empty(&dev->txq)
		&& !skb_queue_empty(&dev->done)) {
			schedule_timeout(UNLINK_TIMEOUT_MS);
			set_current_state(TASK_UNINTERRUPTIBLE);
			netif_dbg(dev, ifdown, dev->net,
				  "waited for %d urb completions\n", temp);
	}
	set_current_state(TASK_RUNNING);
	dev->wait = NULL;
	remove_wait_queue(&unlink_wakeup, &wait);
}
コード例 #5
0
static int usbnet_stop (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			temp;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
	DECLARE_WAITQUEUE (wait, current);

	netif_stop_queue (net);

	if (netif_msg_ifdown (dev))
		devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
			dev->stats.rx_packets, dev->stats.tx_packets,
			dev->stats.rx_errors, dev->stats.tx_errors
			);

	// ensure there are no more active urbs
	add_wait_queue (&unlink_wakeup, &wait);
	dev->wait = &unlink_wakeup;
	temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);

	// maybe wait for deletions to finish.
	while (!skb_queue_empty(&dev->rxq) &&
	       !skb_queue_empty(&dev->txq) &&
	       !skb_queue_empty(&dev->done)) {
		msleep(UNLINK_TIMEOUT_MS);
		if (netif_msg_ifdown (dev))
			devdbg (dev, "waited for %d urb completions", temp);
	}
	dev->wait = NULL;
	remove_wait_queue (&unlink_wakeup, &wait);

	usb_kill_urb(dev->interrupt);

	/* deferred work (task, timer, softirq) must also stop.
	 * can't flush_scheduled_work() until we drop rtnl (later),
	 * else workers could deadlock; so make workers a NOP.
	 */
	dev->flags = 0;
	del_timer_sync (&dev->delay);
	tasklet_kill (&dev->bh);

	return 0;
}
コード例 #6
0
ファイル: shdlc.c プロジェクト: ARMWorks/FA_2451_Linux_Kernel
/*
 * Called from syscall context to establish shdlc link. Sleeps until
 * link is ready or failure.
 */
static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);

	pr_debug("\n");

	mutex_lock(&shdlc->state_mutex);

	shdlc->state = SHDLC_CONNECTING;
	shdlc->connect_wq = &connect_wq;
	shdlc->connect_tries = 0;
	shdlc->connect_result = 1;

	mutex_unlock(&shdlc->state_mutex);

	queue_work(shdlc->sm_wq, &shdlc->sm_work);

	wait_event(connect_wq, shdlc->connect_result != 1);

	return shdlc->connect_result;
}
コード例 #7
0
ファイル: cr_barrier.c プロジェクト: AvengerMoJo/apc-8750
// Called to wait on a barrier
int __cr_barrier_wait(cr_barrier_t *barrier)
{
#if CRI_DEBUG
    DECLARE_WAIT_QUEUE_HEAD_ONSTACK(dummy);
    const int interval = 60;
    int sum = 0;
    do {
        wait_event_timeout(barrier->wait, CR_BARRIER_WAIT_COND(barrier), interval * HZ);
	if (CR_BARRIER_WAIT_COND(barrier)) break;
	sum += interval;
	CR_ERR("cr_barrier warning: tgid/pid %d/%d still blocked after %d seconds, with signal_pending=%d.", current->tgid, current->pid, sum, signal_pending(current));
    } while (1);
    smp_rmb();
    if (barrier->interrupted) {
	CR_ERR("cr_barrier error: interrupt on non-interruptible barrier");
    }
#else
    wait_event(barrier->wait, CR_BARRIER_WAIT_COND(barrier));
#endif
    return do_barrier_once(barrier);
}
コード例 #8
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout, u32 *value)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	int err = 0;

	if (value)
		*value = 0;

	BUG_ON(!check_max(sp, id, thresh));

	/* first check cache */
	if (nvhost_syncpt_min_cmp(sp, id, thresh)) {
		if (value)
			*value = nvhost_syncpt_read_min(sp, id);
		return 0;
	}

	/* keep host alive */
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);

	if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
		/* try to read from register */
		u32 val = nvhost_syncpt_update_min(sp, id);
		if ((s32)(val - thresh) >= 0) {
			if (value)
				*value = val;
			goto done;
		}
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
	if (err)
		goto done;

	err = -EAGAIN;
	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		int remain = wait_event_interruptible_timeout(wq,
						nvhost_syncpt_min_cmp(sp, id, thresh),
						check);
		if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
			if (value)
				*value = nvhost_syncpt_read_min(sp, id);
			err = 0;
			break;
		}
		if (remain < 0) {
			err = remain;
			break;
		}
		if (timeout != NVHOST_NO_TIMEOUT)
			timeout -= check;
		if (timeout) {
			dev_warn(&syncpt_to_dev(sp)->pdev->dev,
				"syncpoint id %d (%s) stuck waiting %d\n",
				id, nvhost_syncpt_name(id), thresh);
			nvhost_syncpt_debug(sp);
		}
	};
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);

done:
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return err;
}
コード例 #9
0
static int igt_wakeup(void *arg)
{
	I915_RND_STATE(prng);
	struct intel_engine_cs *engine = arg;
	struct igt_wakeup *waiters;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	const int count = 4096;
	const u32 max_seqno = count / 4;
	atomic_t ready, set, done;
	int err = -ENOMEM;
	int n, step;

	mock_engine_reset(engine);

	waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
	if (!waiters)
		goto out_engines;

	/* Create a large number of threads, each waiting on a random seqno.
	 * Multiple waiters will be waiting for the same seqno.
	 */
	atomic_set(&ready, count);
	for (n = 0; n < count; n++) {
		waiters[n].wq = &wq;
		waiters[n].ready = &ready;
		waiters[n].set = &set;
		waiters[n].done = &done;
		waiters[n].engine = engine;
		waiters[n].flags = BIT(IDLE);

		waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
					     "i915/igt:%d", n);
		if (IS_ERR(waiters[n].tsk))
			goto out_waiters;

		get_task_struct(waiters[n].tsk);
	}

	for (step = 1; step <= max_seqno; step <<= 1) {
		u32 seqno;

		/* The waiter threads start paused as we assign them a random
		 * seqno and reset the engine. Once the engine is reset,
		 * we signal that the threads may begin their wait upon their
		 * seqno.
		 */
		for (n = 0; n < count; n++) {
			GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
			waiters[n].seqno =
				1 + prandom_u32_state(&prng) % max_seqno;
		}
		mock_seqno_advance(engine, 0);
		igt_wake_all_sync(&ready, &set, &done, &wq, count);

		/* Simulate the GPU doing chunks of work, with one or more
		 * seqno appearing to finish at the same time. A random number
		 * of threads will be waiting upon the update and hopefully be
		 * woken.
		 */
		for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
			usleep_range(50, 500);
			mock_seqno_advance(engine, seqno);
		}
		GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);

		/* With the seqno now beyond any of the waiting threads, they
		 * should all be woken, see that they are complete and signal
		 * that they are ready for the next test. We wait until all
		 * threads are complete and waiting for us (i.e. not a seqno).
		 */
		if (!wait_var_event_timeout(&done,
					    !atomic_read(&done), 10 * HZ)) {
			pr_err("Timed out waiting for %d remaining waiters\n",
			       atomic_read(&done));
			err = -ETIMEDOUT;
			break;
		}

		err = check_rbtree_empty(engine);
		if (err)
			break;
	}

out_waiters:
	for (n = 0; n < count; n++) {
		if (IS_ERR(waiters[n].tsk))
			break;

		set_bit(STOP, &waiters[n].flags);
	}
	mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
	igt_wake_all_sync(&ready, &set, &done, &wq, n);

	for (n = 0; n < count; n++) {
		if (IS_ERR(waiters[n].tsk))
			break;

		kthread_stop(waiters[n].tsk);
		put_task_struct(waiters[n].tsk);
	}

	kvfree(waiters);
out_engines:
	mock_engine_flush(engine);
	return err;
}
コード例 #10
0
ファイル: dir.c プロジェクト: mkrufky/linux
static int fuse_direntplus_link(struct file *file,
				struct fuse_direntplus *direntplus,
				u64 attr_version)
{
	struct fuse_entry_out *o = &direntplus->entry_out;
	struct fuse_dirent *dirent = &direntplus->dirent;
	struct dentry *parent = file->f_path.dentry;
	struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
	struct dentry *dentry;
	struct dentry *alias;
	struct inode *dir = d_inode(parent);
	struct fuse_conn *fc;
	struct inode *inode;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);

	if (!o->nodeid) {
		/*
		 * Unlike in the case of fuse_lookup, zero nodeid does not mean
		 * ENOENT. Instead, it only means the userspace filesystem did
		 * not want to return attributes/handle for this entry.
		 *
		 * So do nothing.
		 */
		return 0;
	}

	if (name.name[0] == '.') {
		/*
		 * We could potentially refresh the attributes of the directory
		 * and its parent?
		 */
		if (name.len == 1)
			return 0;
		if (name.name[1] == '.' && name.len == 2)
			return 0;
	}

	if (invalid_nodeid(o->nodeid))
		return -EIO;
	if (!fuse_valid_type(o->attr.mode))
		return -EIO;

	fc = get_fuse_conn(dir);

	name.hash = full_name_hash(parent, name.name, name.len);
	dentry = d_lookup(parent, &name);
	if (!dentry) {
retry:
		dentry = d_alloc_parallel(parent, &name, &wq);
		if (IS_ERR(dentry))
			return PTR_ERR(dentry);
	}
	if (!d_in_lookup(dentry)) {
		struct fuse_inode *fi;
		inode = d_inode(dentry);
		if (!inode ||
		    get_node_id(inode) != o->nodeid ||
		    ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
			d_invalidate(dentry);
			dput(dentry);
			goto retry;
		}
		if (is_bad_inode(inode)) {
			dput(dentry);
			return -EIO;
		}

		fi = get_fuse_inode(inode);
		spin_lock(&fc->lock);
		fi->nlookup++;
		spin_unlock(&fc->lock);

		forget_all_cached_acls(inode);
		fuse_change_attributes(inode, &o->attr,
				       entry_attr_timeout(o),
				       attr_version);
		/*
		 * The other branch comes via fuse_iget()
		 * which bumps nlookup inside
		 */
	} else {
		inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
				  &o->attr, entry_attr_timeout(o),
				  attr_version);
		if (!inode)
			inode = ERR_PTR(-ENOMEM);

		alias = d_splice_alias(inode, dentry);
		d_lookup_done(dentry);
		if (alias) {
			dput(dentry);
			dentry = alias;
		}
		if (IS_ERR(dentry))
			return PTR_ERR(dentry);
	}
	if (fc->readdirplus_auto)
		set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
	fuse_change_entry_timeout(dentry, o);

	dput(dentry);
	return 0;
}
コード例 #11
0
ファイル: bluecard_cs.c プロジェクト: 0x000000FF/edison-linux
static void bluecard_write_wakeup(struct bluecard_info *info)
{
	if (!info) {
		BT_ERR("Unknown device");
		return;
	}

	if (!test_bit(XMIT_SENDING_READY, &(info->tx_state)))
		return;

	if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
		set_bit(XMIT_WAKEUP, &(info->tx_state));
		return;
	}

	do {
		unsigned int iobase = info->p_dev->resource[0]->start;
		unsigned int offset;
		unsigned char command;
		unsigned long ready_bit;
		register struct sk_buff *skb;
		int len;

		clear_bit(XMIT_WAKEUP, &(info->tx_state));

		if (!pcmcia_dev_present(info->p_dev))
			return;

		if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
			if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state)))
				break;
			offset = 0x10;
			command = REG_COMMAND_TX_BUF_TWO;
			ready_bit = XMIT_BUF_TWO_READY;
		} else {
			if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state)))
				break;
			offset = 0x00;
			command = REG_COMMAND_TX_BUF_ONE;
			ready_bit = XMIT_BUF_ONE_READY;
		}

		skb = skb_dequeue(&(info->txq));
		if (!skb)
			break;

		if (bt_cb(skb)->pkt_type & 0x80) {
			/* Disable RTS */
			info->ctrl_reg |= REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);
		}

		/* Activate LED */
		bluecard_enable_activity_led(info);

		/* Send frame */
		len = bluecard_write(iobase, offset, skb->data, skb->len);

		/* Tell the FPGA to send the data */
		outb_p(command, iobase + REG_COMMAND);

		/* Mark the buffer as dirty */
		clear_bit(ready_bit, &(info->tx_state));

		if (bt_cb(skb)->pkt_type & 0x80) {
			DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
			DEFINE_WAIT(wait);

			unsigned char baud_reg;

			switch (bt_cb(skb)->pkt_type) {
			case PKT_BAUD_RATE_460800:
				baud_reg = REG_CONTROL_BAUD_RATE_460800;
				break;
			case PKT_BAUD_RATE_230400:
				baud_reg = REG_CONTROL_BAUD_RATE_230400;
				break;
			case PKT_BAUD_RATE_115200:
				baud_reg = REG_CONTROL_BAUD_RATE_115200;
				break;
			case PKT_BAUD_RATE_57600:
				/* Fall through... */
			default:
				baud_reg = REG_CONTROL_BAUD_RATE_57600;
				break;
			}

			/* Wait until the command reaches the baseband */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ/10);
			finish_wait(&wq, &wait);

			/* Set baud on baseband */
			info->ctrl_reg &= ~0x03;
			info->ctrl_reg |= baud_reg;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Enable RTS */
			info->ctrl_reg &= ~REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Wait before the next HCI packet can be send */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ);
			finish_wait(&wq, &wait);
		}

		if (len == skb->len) {
			kfree_skb(skb);
		} else {
			skb_pull(skb, len);
			skb_queue_head(&(info->txq), skb);
		}

		info->hdev->stat.byte_tx += len;

		/* Change buffer */
		change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state));

	} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));

	clear_bit(XMIT_SENDING, &(info->tx_state));
}
コード例 #12
0
ファイル: gr3d_t20.c プロジェクト: Ntemis/LG_X3_P880_v20a
int nvhost_gr3d_t20_read_reg(
	struct nvhost_device *dev,
	struct nvhost_channel *channel,
	struct nvhost_hwctx *hwctx,
	u32 offset,
	u32 *value)
{
	struct host1x_hwctx *hwctx_to_save = NULL;
	struct nvhost_hwctx_handler *h = hwctx->h;
	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
	bool need_restore = false;
	u32 syncpt_incrs = 4;
	unsigned int pending = 0;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	void *ctx_waiter, *read_waiter, *completed_waiter;
	struct nvhost_job *job;
	u32 syncval;
	int err;

	if (hwctx->has_timedout)
		return -ETIMEDOUT;

	ctx_waiter = nvhost_intr_alloc_waiter();
	read_waiter = nvhost_intr_alloc_waiter();
	completed_waiter = nvhost_intr_alloc_waiter();
	if (!ctx_waiter || !read_waiter || !completed_waiter) {
		err = -ENOMEM;
		goto done;
	}

	job = nvhost_job_alloc(channel, hwctx,
			NULL,
			nvhost_get_host(dev)->memmgr, 0, 0);
	if (!job) {
		err = -ENOMEM;
		goto done;
	}

	/* keep module powered */
	nvhost_module_busy(dev);

	/* get submit lock */
	err = mutex_lock_interruptible(&channel->submitlock);
	if (err) {
		nvhost_module_idle(dev);
		return err;
	}

	/* context switch */
	if (channel->cur_ctx != hwctx) {
		hwctx_to_save = channel->cur_ctx ?
			to_host1x_hwctx(channel->cur_ctx) : NULL;
		if (hwctx_to_save) {
			syncpt_incrs += hwctx_to_save->save_incrs;
			hwctx_to_save->hwctx.valid = true;
			nvhost_job_get_hwctx(job, &hwctx_to_save->hwctx);
		}
		channel->cur_ctx = hwctx;
		if (channel->cur_ctx && channel->cur_ctx->valid) {
			need_restore = true;
			syncpt_incrs += to_host1x_hwctx(channel->cur_ctx)
				->restore_incrs;
		}
	}

	syncval = nvhost_syncpt_incr_max(&nvhost_get_host(dev)->syncpt,
		p->syncpt, syncpt_incrs);

	job->syncpt_id = p->syncpt;
	job->syncpt_incrs = syncpt_incrs;
	job->syncpt_end = syncval;

	/* begin a CDMA submit */
	nvhost_cdma_begin(&channel->cdma, job);

	/* push save buffer (pre-gather setup depends on unit) */
	if (hwctx_to_save)
		h->save_push(&hwctx_to_save->hwctx, &channel->cdma);

	/* gather restore buffer */
	if (need_restore)
		nvhost_cdma_push(&channel->cdma,
			nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx)
				->restore_size),
			to_host1x_hwctx(channel->cur_ctx)->restore_phys);

	/* Switch to 3D - wait for it to complete what it was doing */
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
		nvhost_opcode_imm_incr_syncpt(
			host1x_uclass_incr_syncpt_cond_op_done_v(),
			p->syncpt));
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
			host1x_uclass_wait_syncpt_base_r(), 1),
		nvhost_class_host_wait_syncpt_base(p->syncpt,
			p->waitbase, 1));
	/*  Tell 3D to send register value to FIFO */
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
		nvhost_class_host_indoff_reg_read(
			host1x_uclass_indoff_indmodid_gr3d_v(),
			offset, false));
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
		NVHOST_OPCODE_NOOP);
	/*  Increment syncpt to indicate that FIFO can be read */
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_imm_incr_syncpt(
			host1x_uclass_incr_syncpt_cond_immediate_v(),
			p->syncpt),
		NVHOST_OPCODE_NOOP);
	/*  Wait for value to be read from FIFO */
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
		nvhost_class_host_wait_syncpt_base(p->syncpt,
			p->waitbase, 3));
	/*  Indicate submit complete */
	nvhost_cdma_push(&channel->cdma,
		nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
		nvhost_class_host_incr_syncpt_base(p->waitbase, 4));
	nvhost_cdma_push(&channel->cdma,
		NVHOST_OPCODE_NOOP,
		nvhost_opcode_imm_incr_syncpt(
			host1x_uclass_incr_syncpt_cond_immediate_v(),
			p->syncpt));

	/* end CDMA submit  */
	nvhost_cdma_end(&channel->cdma, job);
	nvhost_job_put(job);
	job = NULL;

	/*
	 * schedule a context save interrupt (to drain the host FIFO
	 * if necessary, and to release the restore buffer)
	 */
	if (hwctx_to_save) {
		err = nvhost_intr_add_action(
			&nvhost_get_host(dev)->intr,
			p->syncpt,
			syncval - syncpt_incrs
				+ hwctx_to_save->save_incrs
				- 1,
			NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
			ctx_waiter,
			NULL);
		ctx_waiter = NULL;
		WARN(err, "Failed to set context save interrupt");
	}

	/* Wait for FIFO to be ready */
	err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr,
			p->syncpt, syncval - 2,
			NVHOST_INTR_ACTION_WAKEUP, &wq,
			read_waiter,
			&ref);
	read_waiter = NULL;
	WARN(err, "Failed to set wakeup interrupt");
	wait_event(wq,
		nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt,
				p->syncpt, syncval - 2));
	nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, p->syncpt,
			ref);

	/* Read the register value from FIFO */
	err = nvhost_channel_drain_read_fifo(channel, value, 1, &pending);

	/* Indicate we've read the value */
	nvhost_syncpt_cpu_incr(&nvhost_get_host(dev)->syncpt,
			p->syncpt);

	/* Schedule a submit complete interrupt */
	err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr,
			p->syncpt, syncval,
			NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
			completed_waiter, NULL);
	completed_waiter = NULL;
	WARN(err, "Failed to set submit complete interrupt");

	mutex_unlock(&channel->submitlock);

done:
	kfree(ctx_waiter);
	kfree(read_waiter);
	kfree(completed_waiter);
	return err;
}
コード例 #13
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout, u32 *value)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	void *waiter;
	int err = 0, check_count = 0, low_timeout = 0;
	u32 val;

	if (value)
		*value = 0;

	/* first check cache */
	if (nvhost_syncpt_is_expired(sp, id, thresh)) {
		if (value)
			*value = nvhost_syncpt_read_min(sp, id);
		return 0;
	}

	/* keep host alive */
	nvhost_module_busy(syncpt_to_dev(sp)->dev);

	/* try to read from register */
	val = syncpt_op().update_min(sp, id);
	if (nvhost_syncpt_is_expired(sp, id, thresh)) {
		if (value)
			*value = val;
		goto done;
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	waiter = nvhost_intr_alloc_waiter();
	if (!waiter) {
		err = -ENOMEM;
		goto done;
	}

	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
				waiter,
				&ref);
	if (err)
		goto done;

	err = -EAGAIN;
	/* Caller-specified timeout may be impractically low */
	if (timeout < SYNCPT_CHECK_PERIOD)
		low_timeout = timeout;

	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		int remain = wait_event_interruptible_timeout(wq,
				syncpt_update_min_is_expired(sp, id, thresh),
				check);
		if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) {
			if (value)
				*value = nvhost_syncpt_read_min(sp, id);
			err = 0;
			break;
		}
		if (remain < 0) {
			err = remain;
			break;
		}
		if (timeout != NVHOST_NO_TIMEOUT)
			timeout -= check;
		if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
			dev_warn(&syncpt_to_dev(sp)->dev->dev,
				"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
				 current->comm, id, syncpt_op().name(sp, id),
				 thresh, timeout);
			syncpt_op().debug(sp);
			if (check_count == MAX_STUCK_CHECK_COUNT) {
				if (low_timeout) {
					dev_warn(&syncpt_to_dev(sp)->dev->dev,
						"is timeout %d too low?\n",
						low_timeout);
				}
				nvhost_debug_dump(syncpt_to_dev(sp));
			}
			check_count++;
		}
	}
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref);

done:
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
	return err;
}
コード例 #14
0
int nvhost_3dctx_prepare_power_off(struct nvhost_module *mod)
{
	struct nvhost_channel *ch =
			container_of(mod, struct nvhost_channel, mod);
	struct nvhost_hwctx *hwctx_to_save;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	u32 syncpt_incrs, syncpt_val;
	int err = 0;
	void *ref;
	void *ctx_waiter = NULL, *wakeup_waiter = NULL;

	ctx_waiter = nvhost_intr_alloc_waiter();
	wakeup_waiter = nvhost_intr_alloc_waiter();
	if (!ctx_waiter || !wakeup_waiter) {
		err = -ENOMEM;
		goto done;
	}

	if (mod->desc->busy)
		mod->desc->busy(mod);

	mutex_lock(&ch->submitlock);
	hwctx_to_save = ch->cur_ctx;
	if (!hwctx_to_save) {
		mutex_unlock(&ch->submitlock);
		goto done;
	}

	err = nvhost_cdma_begin(&ch->cdma, hwctx_to_save->timeout);
	if (err) {
		mutex_unlock(&ch->submitlock);
		goto done;
	}

	hwctx_to_save->valid = true;
	ch->ctxhandler.get(hwctx_to_save);
	ch->cur_ctx = NULL;

	syncpt_incrs = hwctx_to_save->save_incrs;
	syncpt_val = nvhost_syncpt_incr_max(&ch->dev->syncpt,
					NVSYNCPT_3D, syncpt_incrs);

	ch->ctxhandler.save_push(&ch->cdma, hwctx_to_save);
	nvhost_cdma_end(&ch->cdma, ch->dev->nvmap, NVSYNCPT_3D, syncpt_val,
			NULL, 0, hwctx_to_save->timeout);

	err = nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D,
			syncpt_val - syncpt_incrs + hwctx_to_save->save_thresh,
			NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
			ctx_waiter,
			NULL);
	ctx_waiter = NULL;
	WARN(err, "Failed to set context save interrupt");

	err = nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D, syncpt_val,
			NVHOST_INTR_ACTION_WAKEUP, &wq,
			wakeup_waiter,
			&ref);
	wakeup_waiter = NULL;
	WARN(err, "Failed to set wakeup interrupt");
	wait_event(wq,
		nvhost_syncpt_min_cmp(&ch->dev->syncpt,
				NVSYNCPT_3D, syncpt_val));

	nvhost_intr_put_ref(&ch->dev->intr, ref);

	nvhost_cdma_update(&ch->cdma);

	mutex_unlock(&ch->submitlock);

done:
	kfree(ctx_waiter);
	kfree(wakeup_waiter);
	return err;
}
コード例 #15
0
static
int axusbnet_stop(struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	struct driver_info	*info = dev->driver_info;
	int			temp;
	int			retval;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
#else
	DECLARE_WAIT_QUEUE_HEAD(unlink_wakeup);
#endif
	DECLARE_WAITQUEUE(wait, current);

	netif_stop_queue(net);

	if (netif_msg_ifdown(dev))
		devinfo(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
			dev->stats.rx_packets, dev->stats.tx_packets,
			dev->stats.rx_errors, dev->stats.tx_errors);

	/* allow minidriver to stop correctly (wireless devices to turn off
	 * radio etc) */
	if (info->stop) {
		retval = info->stop(dev);
		if (retval < 0 && netif_msg_ifdown(dev))
			devinfo(dev,
				"stop fail (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
	}

	if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) {
		/* ensure there are no more active urbs */
		add_wait_queue(&unlink_wakeup, &wait);
		dev->wait = &unlink_wakeup;
		temp = unlink_urbs(dev, &dev->txq) +
			unlink_urbs(dev, &dev->rxq);

		/* maybe wait for deletions to finish. */
		while (!skb_queue_empty(&dev->rxq)
				&& !skb_queue_empty(&dev->txq)
				&& !skb_queue_empty(&dev->done)) {
			msleep(UNLINK_TIMEOUT_MS);
			if (netif_msg_ifdown(dev))
				devdbg(dev, "waited for %d urb completions",
				       temp);
		}
		dev->wait = NULL;
		remove_wait_queue(&unlink_wakeup, &wait);
	}

	usb_kill_urb(dev->interrupt);

	/* deferred work (task, timer, softirq) must also stop.
	 * can't flush_scheduled_work() until we drop rtnl (later),
	 * else workers could deadlock; so make workers a NOP.
	 */
	dev->flags = 0;
	del_timer_sync(&dev->delay);
	tasklet_kill(&dev->bh);

	return 0;
}
コード例 #16
0
ファイル: rt_main_dev.c プロジェクト: 3sOx/asuswrt-merlin
/*
========================================================================
Routine Description:
    Close raxx interface.

Arguments:
	*net_dev			the raxx interface pointer

Return Value:
    0					Open OK
	otherwise			Open Fail

Note:
	1. if open fail, kernel will not call the close function.
	2. Free memory for
		(1) Mlme Memory Handler:		MlmeHalt()
		(2) TX & RX:					RTMPFreeTxRxRingMemory()
		(3) BA Reordering: 				ba_reordering_resource_release()
========================================================================
*/
int rt28xx_close(struct net_device *dev)
{
	struct net_device *net_dev = (struct net_device *)dev;
	struct rt_rtmp_adapter *pAd = NULL;
	BOOLEAN Cancelled;
	u32 i = 0;

#ifdef RTMP_MAC_USB
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
	DECLARE_WAITQUEUE(wait, current);
#endif /* RTMP_MAC_USB // */

	GET_PAD_FROM_NET_DEV(pAd, net_dev);

	DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n"));

	Cancelled = FALSE;
	/* Sanity check for pAd */
	if (pAd == NULL)
		return 0;	/* close ok */

	{
#ifdef RTMP_MAC_PCI
		RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_CLOSE);
#endif /* RTMP_MAC_PCI // */

		/* If dirver doesn't wake up firmware here, */
		/* NICLoadFirmware will hang forever when interface is up again. */
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) {
			AsicForceWakeup(pAd, TRUE);
		}
#ifdef RTMP_MAC_USB
		RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS);
#endif /* RTMP_MAC_USB // */

		MlmeRadioOff(pAd);
#ifdef RTMP_MAC_PCI
		pAd->bPCIclkOff = FALSE;
#endif /* RTMP_MAC_PCI // */
	}

	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	for (i = 0; i < NUM_OF_TX_RING; i++) {
		while (pAd->DeQueueRunning[i] == TRUE) {
			DBGPRINT(RT_DEBUG_TRACE,
				 ("Waiting for TxQueue[%d] done..........\n",
				  i));
			RTMPusecDelay(1000);
		}
	}

#ifdef RTMP_MAC_USB
	/* ensure there are no more active urbs. */
	add_wait_queue(&unlink_wakeup, &wait);
	pAd->wait = &unlink_wakeup;

	/* maybe wait for deletions to finish. */
	i = 0;
	/*while((i < 25) && atomic_read(&pAd->PendingRx) > 0) */
	while (i < 25) {
		unsigned long IrqFlags;

		RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags);
		if (pAd->PendingRx == 0) {
			RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);
			break;
		}
		RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);

		msleep(UNLINK_TIMEOUT_MS);	/*Time in millisecond */
		i++;
	}
	pAd->wait = NULL;
	remove_wait_queue(&unlink_wakeup, &wait);
#endif /* RTMP_MAC_USB // */

	/* Stop Mlme state machine */
	MlmeHalt(pAd);

	/* Close net tasklets */
	RtmpNetTaskExit(pAd);

	{
		MacTableReset(pAd);
	}

	MeasureReqTabExit(pAd);
	TpcReqTabExit(pAd);

	/* Close kernel threads */
	RtmpMgmtTaskExit(pAd);

#ifdef RTMP_MAC_PCI
	{
		BOOLEAN brc;
		/*      unsigned long                   Value; */

		if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE)) {
			RTMP_ASIC_INTERRUPT_DISABLE(pAd);
		}
		/* Receive packets to clear DMA index after disable interrupt. */
		/*RTMPHandleRxDoneInterrupt(pAd); */
		/* put to radio off to save power when driver unload.  After radiooff, can't write /read register.  So need to finish all */
		/* register access before Radio off. */

		brc = RT28xxPciAsicRadioOff(pAd, RTMP_HALT, 0);

/*In  solution 3 of 3090F, the bPCIclkOff will be set to TRUE after calling RT28xxPciAsicRadioOff */
		pAd->bPCIclkOff = FALSE;

		if (brc == FALSE) {
			DBGPRINT(RT_DEBUG_ERROR,
				 ("%s call RT28xxPciAsicRadioOff fail!\n",
				  __func__));
		}
	}

/*
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE))
	{
		RTMP_ASIC_INTERRUPT_DISABLE(pAd);
	}

	// Disable Rx, register value supposed will remain after reset
	NICIssueReset(pAd);
*/
#endif /* RTMP_MAC_PCI // */

	/* Free IRQ */
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE)) {
#ifdef RTMP_MAC_PCI
		/* Deregister interrupt function */
		RtmpOSIRQRelease(net_dev);
#endif /* RTMP_MAC_PCI // */
		RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE);
	}
	/* Free Ring or USB buffers */
	RTMPFreeTxRxRingMemory(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	/* Free BA reorder resource */
	ba_reordering_resource_release(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP);

/*+++Modify by woody to solve the bulk fail+++*/
	{
	}

	DBGPRINT(RT_DEBUG_TRACE, ("<=== rt28xx_close\n"));
	return 0;		/* close ok */
}				/* End of rt28xx_close */
コード例 #17
0
int nvhost_gr3d_t30_read_reg(
	struct platform_device *dev,
	struct nvhost_channel *channel,
	struct nvhost_hwctx *hwctx,
	u32 offset,
	u32 *value)
{
	struct host1x_hwctx_handler *h = to_host1x_hwctx_handler(hwctx->h);
	u32 syncpt_incrs = 1;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	void *read_waiter = NULL;
	struct nvhost_job *job;
	int err;
	struct mem_handle *mem = NULL;
	u32 *mem_ptr = NULL;
	u32 *cmdbuf_ptr = NULL;
	struct sg_table *mem_sgt = NULL;
	struct mem_mgr *memmgr = hwctx->memmgr;
	u32 opcodes[] = {
		/* Switch to 3D - set up output to memory */
		nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1),
		nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
		0xdeadbeef,
		/* Get host1x to request a register read */
		nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
				host1x_uclass_indoff_r(), 1),
		nvhost_class_host_indoff_reg_read(
				host1x_uclass_indoff_indmodid_gr3d_v(),
				offset, false),
		nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
		/* send reg reads back to host */
		nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
		nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0),
		/* Finalize with syncpt increment */
		nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
				host1x_uclass_incr_syncpt_base_r(), 1),
		nvhost_class_host_incr_syncpt_base(h->h.waitbase,
				1),
		nvhost_opcode_imm_incr_syncpt(
				host1x_uclass_incr_syncpt_cond_immediate_v(),
				h->h.syncpt),
	};

	/* 12 slots for gather, and one slot for storing the result value */
	mem = nvhost_memmgr_alloc(memmgr, sizeof(opcodes)+4,
			32, mem_mgr_flag_uncacheable);
	if (IS_ERR(mem))
		return PTR_ERR(mem);

	mem_ptr = nvhost_memmgr_mmap(mem);
	if (!mem_ptr) {
		err = -ENOMEM;
		goto done;
	}
	cmdbuf_ptr = mem_ptr + 1;

	mem_sgt = nvhost_memmgr_pin(memmgr, mem, &channel->dev->dev);
	if (IS_ERR(mem_sgt)) {
		err = -ENOMEM;
		mem_sgt = NULL;
		goto done;
	}
	/* Set address of target memory slot to the stream */
	opcodes[3] = sg_dma_address(mem_sgt->sgl);

	read_waiter = nvhost_intr_alloc_waiter();
	if (!read_waiter) {
		err = -ENOMEM;
		goto done;
	}

	job = nvhost_job_alloc(channel, hwctx, 1, 0, 0, 1, memmgr);
	if (!job) {
		err = -ENOMEM;
		goto done;
	}

	job->hwctx_syncpt_idx = 0;
	job->sp->id = h->h.syncpt;
	job->sp->waitbase = h->h.waitbase;
	job->sp->incrs = syncpt_incrs;
	job->num_syncpts = 1;
	job->serialize = 1;
	memcpy(cmdbuf_ptr, opcodes, sizeof(opcodes));

	/* Submit job */
	nvhost_job_add_gather(job, nvhost_memmgr_handle_to_id(mem),
			ARRAY_SIZE(opcodes), 4);

	err = nvhost_job_pin(job, &nvhost_get_host(dev)->syncpt);
	if (err)
		goto done;

	err = nvhost_channel_submit(job);
	if (err)
		goto done;

	/* Wait for read to be ready */
	err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr,
			h->h.syncpt, job->sp->fence,
			NVHOST_INTR_ACTION_WAKEUP, &wq,
			read_waiter,
			&ref);
	read_waiter = NULL;
	WARN(err, "Failed to set wakeup interrupt");
	wait_event(wq,
		nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt,
				h->h.syncpt, job->sp->fence));
	nvhost_job_put(job);
	job = NULL;
	nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, h->h.syncpt,
			ref);

	*value = *mem_ptr;

done:
	kfree(read_waiter);
	if (mem_ptr)
		nvhost_memmgr_munmap(mem, mem_ptr);
	if (mem_sgt)
		nvhost_memmgr_unpin(memmgr, mem, &channel->dev->dev, mem_sgt);
	if (mem)
		nvhost_memmgr_put(memmgr, mem);
	return err;
}
コード例 #18
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout, u32 *value)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	void *waiter;
	int err = 0, check_count = 0, low_timeout = 0;
	static int print_once = 0;

	if (value)
		*value = 0;

	BUG_ON(!syncpt_op(sp).update_min);
	if (!nvhost_syncpt_check_max(sp, id, thresh)) {
		dev_warn(&syncpt_to_dev(sp)->pdev->dev,
			"wait %d (%s) for (%d) wouldn't be met (max %d)\n",
			id, syncpt_op(sp).name(sp, id), thresh,
			nvhost_syncpt_read_max(sp, id));
		nvhost_debug_dump(syncpt_to_dev(sp));
		return -EINVAL;
	}

	/* first check cache */
	if (nvhost_syncpt_min_cmp(sp, id, thresh)) {
		if (value)
			*value = nvhost_syncpt_read_min(sp, id);
		return 0;
	}

	/* keep host alive */
	nvhost_module_busy(syncpt_to_dev(sp)->dev);

	if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
		/* try to read from register */
		u32 val = syncpt_op(sp).update_min(sp, id);
		if ((s32)(val - thresh) >= 0) {
			if (value)
				*value = val;
			goto done;
		}
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	waiter = nvhost_intr_alloc_waiter();
	if (!waiter) {
		err = -ENOMEM;
		goto done;
	}

	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
				waiter,
				&ref);
	if (err)
		goto done;

	err = -EAGAIN;
	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		int remain = wait_event_interruptible_timeout(wq,
						nvhost_syncpt_min_cmp(sp, id, thresh),
						check);
		if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
			if (value)
				*value = nvhost_syncpt_read_min(sp, id);
			err = 0;
			break;
		}
		if (remain < 0) {
			err = remain;
			break;
		}
		if (timeout != NVHOST_NO_TIMEOUT) {
			if (timeout < SYNCPT_CHECK_PERIOD) {
				/* Caller-specified timeout may be impractically low */
				low_timeout = timeout;
			}
			timeout -= check;
		}
		if (timeout) {
			dev_warn(&syncpt_to_dev(sp)->pdev->dev,
				"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
				 current->comm, id, syncpt_op(sp).name(sp, id),
				 thresh, timeout);
			syncpt_op(sp).debug(sp);
			print_once++;
			if (print_once == 1)
			{
				nvhost_debug_dump(syncpt_to_dev(sp));
				debug_stuck_syncpoint();
			}
			if (check_count > MAX_STUCK_CHECK_COUNT) {
				if (low_timeout) {
					dev_warn(&syncpt_to_dev(sp)->pdev->dev,
						"is timeout %d too low?\n",
						low_timeout);
				}
				nvhost_debug_dump(syncpt_to_dev(sp));
				BUG();
			}
			check_count++;
		}
	}
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);

done:
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
	return err;
}
コード例 #19
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	int err = 0;
	//struct nvhost_dev *dev = syncpt_to_dev(sp);

	BUG_ON(!check_max(sp, id, thresh));

	/* first check cache */
	if (nvhost_syncpt_min_cmp(sp, id, thresh))
		return 0;

	/* keep host alive */
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);

	if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
		/* try to read from register */
		u32 val = nvhost_syncpt_update_min(sp, id);
		if ((s32)(val - thresh) >= 0)
			goto done;
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
	if (err)
		goto done;

	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		err = wait_event_interruptible_timeout(wq,
						nvhost_syncpt_min_cmp(sp, id, thresh),
						check);
		if (err != 0)
			break;
		if (timeout != NVHOST_NO_TIMEOUT)
			timeout -= SYNCPT_CHECK_PERIOD;
		if (timeout) {
			dev_warn(&syncpt_to_dev(sp)->pdev->dev,
				"syncpoint id %d (%s) stuck waiting %d  timeout=%d\n",
				id, nvhost_syncpt_name(id), thresh, timeout);
			/* A wait queue in nvhost driver maybe run frequently
			  when early suspend/late resume. These log will be
			  printed,then the early suspend/late resume
			  maybe blocked,then it will triger early suspend/late
			  resume watchdog. Now we cancel these log. */
			/*
			nvhost_syncpt_debug(sp);
			nvhost_channel_fifo_debug(dev);
			nvhost_sync_reg_dump(dev);
			*/
		}
	};
	if (err > 0)
		err = 0;
	else if (err == 0)
		err = -EAGAIN;
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);

done:
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return err;
}
コード例 #20
0
int rt28xx_close(IN PNET_DEV dev)
{
	struct net_device * net_dev = (struct net_device *)dev;
    RTMP_ADAPTER	*pAd = net_dev->ml_priv;
	BOOLEAN 		Cancelled = FALSE;
	UINT32			i = 0;
#ifdef RT2870
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
	DECLARE_WAITQUEUE(wait, current);

	
#endif 


    DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n"));

	
	if (pAd == NULL)
		return 0; 

	{
		
		
#ifdef RT2860
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE) ||
			RTMP_SET_PSFLAG(pAd, fRTMP_PS_SET_PCI_CLK_OFF_COMMAND) ||
			RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_IDLE_RADIO_OFF))
#endif
#ifdef RT2870
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE))
#endif
        {
#ifdef RT2860
		    AsicForceWakeup(pAd, RTMP_HALT);
#endif
#ifdef RT2870
		    AsicForceWakeup(pAd, TRUE);
#endif
        }

		if (INFRA_ON(pAd) &&
			(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)))
		{
			MLME_DISASSOC_REQ_STRUCT	DisReq;
			MLME_QUEUE_ELEM *MsgElem = (MLME_QUEUE_ELEM *) kmalloc(sizeof(MLME_QUEUE_ELEM), MEM_ALLOC_FLAG);

			COPY_MAC_ADDR(DisReq.Addr, pAd->CommonCfg.Bssid);
			DisReq.Reason =  REASON_DEAUTH_STA_LEAVING;

			MsgElem->Machine = ASSOC_STATE_MACHINE;
			MsgElem->MsgType = MT2_MLME_DISASSOC_REQ;
			MsgElem->MsgLen = sizeof(MLME_DISASSOC_REQ_STRUCT);
			NdisMoveMemory(MsgElem->Msg, &DisReq, sizeof(MLME_DISASSOC_REQ_STRUCT));

			
			pAd->MlmeAux.AutoReconnectSsidLen= 32;
			NdisZeroMemory(pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen);

			pAd->Mlme.CntlMachine.CurrState = CNTL_WAIT_OID_DISASSOC;
			MlmeDisassocReqAction(pAd, MsgElem);
			kfree(MsgElem);

			RTMPusecDelay(1000);
		}

#ifdef RT2870
	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS);
#endif 

#ifdef CCX_SUPPORT
		RTMPCancelTimer(&pAd->StaCfg.LeapAuthTimer, &Cancelled);
#endif

		RTMPCancelTimer(&pAd->StaCfg.StaQuickResponeForRateUpTimer, &Cancelled);
		RTMPCancelTimer(&pAd->StaCfg.WpaDisassocAndBlockAssocTimer, &Cancelled);

		MlmeRadioOff(pAd);
#ifdef RT2860
		pAd->bPCIclkOff = FALSE;
#endif
	}

	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	for (i = 0 ; i < NUM_OF_TX_RING; i++)
	{
		while (pAd->DeQueueRunning[i] == TRUE)
		{
			printk("Waiting for TxQueue[%d] done..........\n", i);
			RTMPusecDelay(1000);
		}
	}

#ifdef RT2870
	
	add_wait_queue (&unlink_wakeup, &wait);
	pAd->wait = &unlink_wakeup;

	
	i = 0;
	
	while(i < 25)
	{
		unsigned long IrqFlags;

		RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags);
		if (pAd->PendingRx == 0)
		{
			RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);
			break;
		}
		RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);

		msleep(UNLINK_TIMEOUT_MS);	
		i++;
	}
	pAd->wait = NULL;
	remove_wait_queue (&unlink_wakeup, &wait);
#endif 

#ifdef RT2870
	
	RT2870_TimerQ_Exit(pAd);
	
	RT28xxThreadTerminate(pAd);
#endif 

	
	MlmeHalt(pAd);

	
	kill_thread_task(pAd);

	MacTableReset(pAd);

	MeasureReqTabExit(pAd);
	TpcReqTabExit(pAd);

#ifdef RT2860
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE))
	{
		NICDisableInterrupt(pAd);
	}

	
	NICIssueReset(pAd);

	
	if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE))
	{
		
		RT28XX_IRQ_RELEASE(net_dev)
		RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE);
	}
#endif

	
	RTMPFreeTxRxRingMemory(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);

	
	ba_reordering_resource_release(pAd);

	RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP);

	return 0; 
}