Example #1
0
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *)arg;
	struct vmw_private *dev_priv = vmw_priv(dev);
	uint32_t status, masked_status;

	spin_lock(&dev_priv->irq_lock);
	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
	masked_status = status & dev_priv->irq_mask;
	spin_unlock(&dev_priv->irq_lock);

	if (likely(status))
		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);

	if (!masked_status)
		return IRQ_NONE;

	if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
			     SVGA_IRQFLAG_FENCE_GOAL)) {
		vmw_fences_update(dev_priv->fman);
		wake_up_all(&dev_priv->fence_queue);
	}

	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
		wake_up_all(&dev_priv->fifo_queue);


	return IRQ_HANDLED;
}
Example #2
0
static void cifs_umount_begin(struct super_block *sb)
{
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
	struct cifs_tcon *tcon;

	if (cifs_sb == NULL)
		return;

	tcon = cifs_sb_master_tcon(cifs_sb);

	spin_lock(&cifs_tcp_ses_lock);
	if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
		/* we have other mounts to same share or we have
		   already tried to force umount this and woken up
		   all waiting network requests, nothing to do */
		spin_unlock(&cifs_tcp_ses_lock);
		return;
	} else if (tcon->tc_count == 1)
		tcon->tidStatus = CifsExiting;
	spin_unlock(&cifs_tcp_ses_lock);

	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
	/* cancel_notify_requests(tcon); */
	if (tcon->ses && tcon->ses->server) {
		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
		wake_up_all(&tcon->ses->server->request_q);
		wake_up_all(&tcon->ses->server->response_q);
		msleep(1); /* yield */
		/* we have to kick the requests once more */
		wake_up_all(&tcon->ses->server->response_q);
		msleep(1);
	}

	return;
}
Example #3
0
// This function is called when a /dev/osprdX file is finally closed.
// (If the file descriptor was dup2ed, this function is called only when the
// last copy is closed.)
static int osprd_close_last(struct inode *inode, struct file *filp)
{
	if (filp) {
		osprd_info_t *d = file2osprd(filp);
		int filp_writable = filp->f_mode & FMODE_WRITE;

		// EXERCISE: If the user closes a ramdisk file that holds
		// a lock, release the lock.  Also wake up blocked processes
		// as appropriate.

		// Your code here.
		
		if(!(filp->f_flags & F_OSPRD_LOCKED))
			return 0;
		
		osp_spin_lock(&d->mutex);
		
		if(filp_writable) {
			d->write_lock_count--;
			d->current_write_pid = -1;
			wake_up_all(&d->blockq);
		}
		else {
			d->read_lock_count--;
			process_queue_t temp = d->read_queue;
			process_queue_t placeholder = d->read_queue;
		
			while(placeholder != NULL) {
		
				if((placeholder->pid == current->pid) && (temp == NULL)) {
					d->read_queue = placeholder->next;
				}
				else if(placeholder->pid == current->pid) {
					temp->next = placeholder->next;
					break;
				}
			
				temp = placeholder;
				placeholder = placeholder->next;
			}
			
			wake_up_all(&d->blockq);
		}
		filp->f_flags &= ~F_OSPRD_LOCKED;
		osp_spin_unlock(&d->mutex);

		// This line avoids compiler warnings; you may remove it.
		(void) filp_writable, (void) d;

	}

	return 0;
}
Example #4
0
asmlinkage long ManageMailbox(bool stop, int *count){
	struct Mailbox* self;
	struct list* hashLink;
	spin_lock_irq(&creationLock);
	if ((self = HashFind(current->tgid)) == NULL){
		//Allocate and initialize the mailbox for the receiver
		printk(KERN_INFO "Mailbox created via mng for %d \n", current->tgid);
		self = kmem_cache_alloc(mailboxCache, GFP_KERNEL);
		self->owner = current->tgid;
		self->numberofMessages = 0;
		self->status = false;
		self->message = NULL;
		atomic_set(&self->references, 0);
		self->waitingFull = 0;
		self->waitingEmpty = 0;
		init_waitqueue_head(&self->canExit);
		spin_lock_init(&self->lock);
		init_waitqueue_head(&self->notEmpty);
		init_waitqueue_head(&self->notFull);
		//Allocate and initialize the hash link for the 		//receiver
		hashLink = kmem_cache_alloc(listCache, GFP_KERNEL);
		hashLink->mailbox = self;
		hashLink->pid = current->tgid;
		hashLink->next = NULL;
		HashAdd(current->tgid, hashLink);
	}	
	atomic_add(1, &self->references);
	spin_unlock_irq(&creationLock);
	spin_lock_irq(&self->lock);
	// If the count pointer isn't null, copy the number of messages to user space
	if (count != NULL){
		if(copy_to_user(count, &self->numberofMessages, sizeof(int))){
			atomic_sub(1, &self->references);
			wake_up_all(&self->canExit);
			spin_unlock_irq(&self->lock);
			return MSG_ARG_ERROR;
		}
	}
	// If stop is set to true, need to wake up all the waiting processes so that they can return
	if (stop == true){
		self->status = stop;
		wake_up_all(&self->notFull);
		wake_up_all(&self->notEmpty);
	}
	atomic_sub(1, &self->references);
	wake_up_all(&self->canExit);
	spin_unlock_irq(&self->lock);
	return 0;
} 
Example #5
0
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
	struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
	struct sh_mobile_ceu_dev *pcdev = ici->priv;
	unsigned long flags;

	BUG_ON(icd != pcdev->icd);

	
	ceu_write(pcdev, CEIER, 0);
	ceu_write(pcdev, CAPSR, 1 << 16); 

	
	spin_lock_irqsave(&pcdev->lock, flags);
	if (pcdev->active) {
		list_del(&pcdev->active->queue);
		pcdev->active->state = VIDEOBUF_ERROR;
		wake_up_all(&pcdev->active->done);
		pcdev->active = NULL;
	}
	spin_unlock_irqrestore(&pcdev->lock, flags);

	pm_runtime_put_sync(ici->v4l2_dev.dev);

	dev_info(icd->dev.parent,
		 "SuperH Mobile CEU driver detached from camera %d\n",
		 icd->devnum);

	pcdev->icd = NULL;
}
Example #6
0
static void seq_fid_alloc_fini(struct lu_client_seq *seq, __u64 seqnr,
			       bool whole)
{
	LASSERT(seq->lcs_update == 1);

	mutex_lock(&seq->lcs_mutex);
	if (seqnr != 0) {
		CDEBUG(D_INFO, "%s: New sequence [0x%16.16llx]\n",
		       seq->lcs_name, seqnr);

		seq->lcs_fid.f_seq = seqnr;
		if (whole) {
			/* Since the caller require the whole seq,
			 * so marked this seq to be used */
			if (seq->lcs_type == LUSTRE_SEQ_METADATA)
				seq->lcs_fid.f_oid =
					LUSTRE_METADATA_SEQ_MAX_WIDTH;
			else
				seq->lcs_fid.f_oid = LUSTRE_DATA_SEQ_MAX_WIDTH;
		} else {
			seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
		}
		seq->lcs_fid.f_ver = 0;
	}

	--seq->lcs_update;
	wake_up_all(&seq->lcs_waitq);
}
static int remove_queued_job(int id)
{
    struct queue *q = NULL;
    int err = -ENXIO;

    if (id < 0)
        return -EINVAL;

    mutex_lock(&qmutex);
    INFO("removing job[%d]", id);

    if (qlen > 0) {
        q = remove_job(id);
        if (q) {
            qlen--;
            destroy_job(q->job);
            kfree(q);
            err = 0;
            INFO("job [%d] removed", id);
            wake_up_all(&pwq);
        }
    }

    mutex_unlock(&qmutex);

    return err;
}
Example #8
0
// This function is called when a /dev/osprdX file is finally closed.
// (If the file descriptor was dup2ed, this function is called only when the
// last copy is closed.)
static int osprd_close_last(struct inode *inode, struct file *filp)
{

	if (filp) {
		osprd_info_t *d = file2osprd(filp);
		int filp_writable = filp->f_mode & FMODE_WRITE;

		// EXERCISE: If the user closes a ramdisk file that holds
		// a lock, release the lock.  Also wake up blocked processes
		// as appropriate.

		// Your code here.
		osp_spin_lock(&d->mutex);

		if(filp->f_flags & F_OSPRD_LOCKED)
        {
			if (filp_writable != 0)
				d->write_locks--;
			else
				d->read_locks--;

			wake_up_all(&d->blockq);
            filp->f_flags &= !F_OSPRD_LOCKED;
		}
		osp_spin_unlock(&d->mutex);

		// This line avoids compiler warnings; you may remove it.
		// (void) filp_writable, (void) d;

	}

	return 0;
}
Example #9
0
/**
 * exit_oom_victim - note the exit of an OOM victim
 */
void exit_oom_victim(void)
{
	clear_thread_flag(TIF_MEMDIE);

	if (!atomic_dec_return(&oom_victims))
		wake_up_all(&oom_victims_wait);
}
Example #10
0
File: vhost.c Project: 7799/linux
static int vhost_worker(void *data)
{
	struct vhost_dev *dev = data;
	struct vhost_work *work = NULL;
	unsigned uninitialized_var(seq);
	mm_segment_t oldfs = get_fs();

	set_fs(USER_DS);
	use_mm(dev->mm);

	for (;;) {
		/* mb paired w/ kthread_stop */
		set_current_state(TASK_INTERRUPTIBLE);

		spin_lock_irq(&dev->work_lock);
		if (work) {
			work->done_seq = seq;
			if (work->flushing)
				wake_up_all(&work->done);
		}

		if (kthread_should_stop()) {
			spin_unlock_irq(&dev->work_lock);
			__set_current_state(TASK_RUNNING);
			break;
		}
		if (!list_empty(&dev->work_list)) {
			work = list_first_entry(&dev->work_list,
						struct vhost_work, node);
			list_del_init(&work->node);
			seq = work->queue_seq;
		} else
/**
 * __vb2_queue_cancel() - cancel and stop (pause) streaming
 *
 * Removes all queued buffers from driver's queue and all buffers queued by
 * userspace from videobuf's queue. Returns to state after reqbufs.
 */
static void __vb2_queue_cancel(struct vb2_queue *q)
{
	unsigned int i;

	/*
	 * Tell driver to stop all transactions and release all queued
	 * buffers.
	 */
	if (q->streaming)
		call_qop(q, stop_streaming, q);
	q->streaming = 0;

	/*
	 * Remove all buffers from videobuf's list...
	 */
	INIT_LIST_HEAD(&q->queued_list);
	/*
	 * ...and done list; userspace will not receive any buffers it
	 * has not already dequeued before initiating cancel.
	 */
	INIT_LIST_HEAD(&q->done_list);
	atomic_set(&q->queued_count, 0);
	wake_up_all(&q->done_wq);

	/*
	 * Reinitialize all buffers for next use.
	 */
	for (i = 0; i < q->num_buffers; ++i)
		q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
}
Example #12
0
/*
 * check a range of space and convert unwritten extents to written.
 *
 * Called with inode->i_mutex; we depend on this when we manipulate
 * io->flag, since we could otherwise race with ext4_flush_completed_IO()
 */
int ext4_end_io_nolock(ext4_io_end_t *io)
{
	struct inode *inode = io->inode;
	loff_t offset = io->offset;
	ssize_t size = io->size;
	int ret = 0;

	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
		   "list->prev 0x%p\n",
		   io, inode->i_ino, io->list.next, io->list.prev);

	ret = ext4_convert_unwritten_extents(inode, offset, size);
	if (ret < 0) {
		ext4_msg(inode->i_sb, KERN_EMERG,
			 "failed to convert unwritten extents to written "
			 "extents -- potential data loss!  "
			 "(inode %lu, offset %llu, size %zd, error %d)",
			 inode->i_ino, offset, size, ret);
	}

	/* Wake up anyone waiting on unwritten extent conversion */
	if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
		wake_up_all(ext4_ioend_wq(io->inode));
	if (io->flag & EXT4_IO_END_DIRECT)
		inode_dio_done(inode);
	if (io->iocb)
		aio_complete(io->iocb, io->result, 0);
	return ret;
}
void wcd9xxx_spmi_unlock_sleep()
{
	mutex_lock(&map.pm_lock);
	if (--map.wlock_holders == 0) {
		pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
			 __func__, map.pm_state, WCD9XXX_PM_SLEEPABLE);
		/*
		 * if wcd9xxx_spmi_lock_sleep failed, pm_state would be still
		 * WCD9XXX_PM_ASLEEP, don't overwrite
		 */
		if (likely(map.pm_state == WCD9XXX_PM_AWAKE))
			map.pm_state = WCD9XXX_PM_SLEEPABLE;
		pm_qos_update_request(&map.pm_qos_req,
				PM_QOS_DEFAULT_VALUE);
		#ifdef VENDOR_EDIT
		//John.Xu@PhoneSw.AudioDriver, 2015/03/19, Add for Qcom patch,
		//Headset sometime not detected when phone is sleep
		pm_relax(&map.spmi[0]->dev);
		#endif /* VENDOR_EDIT */
	}
	mutex_unlock(&map.pm_lock);
	pr_debug("%s: wake lock counter %d\n", __func__,
			map.wlock_holders);
	pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state);
	wake_up_all(&map.pm_wq);
}
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
{
    uint32_t fNew;
    uint32_t fOld;

    /*
     * Validate input.
     */
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    if (!pThis)
        return VERR_INVALID_PARAMETER;
    AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
    rtR0SemEventMultiLnxRetain(pThis);

    /*
     * Signal the event object.  The cause of the paranoia here is racing to try
     * deal with racing RTSemEventMultiSignal calls (should probably be
     * forbidden, but it's relatively easy to handle).
     */
    do
    {
        fNew = fOld = ASMAtomicUoReadU32(&pThis->fStateAndGen);
        fNew += 1 << RTSEMEVENTMULTILNX_GEN_SHIFT;
        fNew |= RTSEMEVENTMULTILNX_STATE_MASK;
    }
    while (!ASMAtomicCmpXchgU32(&pThis->fStateAndGen, fNew, fOld));

    wake_up_all(&pThis->Head);

    rtR0SemEventMultiLnxRelease(pThis);
    return VINF_SUCCESS;
}
bool wcd9xxx_lock_sleep(
	struct wcd9xxx_core_resource *wcd9xxx_res)
{
	enum wcd9xxx_pm_state os;

	mutex_lock(&wcd9xxx_res->pm_lock);
	if (wcd9xxx_res->wlock_holders++ == 0) {
		pr_debug("%s: holding wake lock\n", __func__);
		pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
				      msm_cpuidle_get_deep_idle_latency());
	}
	mutex_unlock(&wcd9xxx_res->pm_lock);

	if (!wait_event_timeout(wcd9xxx_res->pm_wq,
				((os =  wcd9xxx_pm_cmpxchg(wcd9xxx_res,
						  WCD9XXX_PM_SLEEPABLE,
						  WCD9XXX_PM_AWAKE)) ==
							WCD9XXX_PM_SLEEPABLE ||
					(os == WCD9XXX_PM_AWAKE)),
				msecs_to_jiffies(
					WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
		pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
			__func__,
			WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
			wcd9xxx_res->wlock_holders);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return false;
	}
	wake_up_all(&wcd9xxx_res->pm_wq);
	return true;
}
Example #16
0
/* Locking: Caller holds q->vb_lock */
void videobuf_queue_cancel(struct videobuf_queue *q)
{
	unsigned long flags = 0;
	int i;

	q->streaming = 0;
	q->reading  = 0;
	wake_up_interruptible_sync(&q->wait);

	/* remove queued buffers from list */
	spin_lock_irqsave(q->irqlock, flags);
	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
		if (NULL == q->bufs[i])
			continue;
		if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
			list_del(&q->bufs[i]->queue);
			q->bufs[i]->state = VIDEOBUF_ERROR;
			wake_up_all(&q->bufs[i]->done);
		}
	}
	spin_unlock_irqrestore(q->irqlock, flags);

	/* free all buffers + clear queue */
	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
		if (NULL == q->bufs[i])
			continue;
		q->ops->buf_release(q, q->bufs[i]);
	}
	INIT_LIST_HEAD(&q->stream);
}
static void destroy_global(void)
{
    struct queue *q;
    int i;

    INFO("destorying...");

    mutex_lock(&qmutex);

    should_stop = true;
    wake_up_all(&pwq); /* wake up all producers */

    /* wake up and stop all consumers */
    for (i = 0; i < num_consumer; i++)
        kthread_stop(cthreads[i]);

    while (head) {
        q = remove_first_job();
        qlen--;
        destroy_job(q->job);
        kfree(q);
    }

    kfree(cthreads);

    mutex_unlock(&qmutex);
}
static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
	struct sync_file *sync_file;

	sync_file = container_of(cb, struct sync_file, cb);

	wake_up_all(&sync_file->wq);
}
Example #19
0
static void global_wait_callback(struct kgsl_device *device,
		struct kgsl_context *context, void *priv, int result)
{
	struct adreno_context *drawctxt = priv;

	wake_up_all(&drawctxt->waiting);
	kgsl_context_put(&drawctxt->base);
}
static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
{
	req->result = result;
	if (req->status != RQ_ABANDONED)
		memcpy(req->reply_buf, server->rxbuf, req->datalen);
	req->status = RQ_DONE;
	wake_up_all(&req->wq);
	ncp_req_put(req);
}
Example #21
0
static inline void enc_pools_wakeup(void)
{
	assert_spin_locked(&page_pools.epp_lock);

	if (unlikely(page_pools.epp_waitqlen)) {
		LASSERT(waitqueue_active(&page_pools.epp_waitq));
		wake_up_all(&page_pools.epp_waitq);
	}
}
Example #22
0
static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
{
	struct inode *inode = io_end->inode;

	io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
	/* Wake up anyone waiting on unwritten extent conversion */
	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
		wake_up_all(ext4_ioend_wq(inode));
}
Example #23
0
/*
 * The monitor responds with mount ack indicate mount success.  The
 * included client ticket allows the client to talk to MDSs and OSDs.
 */
static void ceph_monc_handle_map(struct ceph_mon_client *monc,
                                 struct ceph_msg *msg)
{
    struct ceph_client *client = monc->client;
    struct ceph_monmap *monmap = NULL, *old = monc->monmap;
    void *p, *end;
    int had_debugfs_info, init_debugfs = 0;

    mutex_lock(&monc->mutex);

    had_debugfs_info = have_debugfs_info(monc);

    dout("handle_monmap\n");
    p = msg->front.iov_base;
    end = p + msg->front.iov_len;

    monmap = ceph_monmap_decode(p, end);
    if (IS_ERR(monmap)) {
        pr_err("problem decoding monmap, %d\n",
               (int)PTR_ERR(monmap));
        goto out;
    }

    if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
        kfree(monmap);
        goto out;
    }

    client->monc.monmap = monmap;
    kfree(old);

    if (!client->have_fsid) {
        client->have_fsid = true;
        if (!had_debugfs_info && have_debugfs_info(monc)) {
            pr_info("client%lld fsid %pU\n",
                    ceph_client_id(monc->client),
                    &monc->client->fsid);
            init_debugfs = 1;
        }
        mutex_unlock(&monc->mutex);

        if (init_debugfs) {
            /*
             * do debugfs initialization without mutex to avoid
             * creating a locking dependency
             */
            ceph_debugfs_client_init(monc->client);
        }

        goto out_unlocked;
    }
out:
    mutex_unlock(&monc->mutex);
out_unlocked:
    wake_up_all(&client->auth_wq);
}
Example #24
0
static void slow_work_done_thread_processing(int id, struct slow_work *work)
{
    struct module *module = slow_work_thread_processing[id];

    slow_work_thread_processing[id] = NULL;
    smp_mb();
    if (slow_work_unreg_work_item == work ||
            slow_work_unreg_module == module)
        wake_up_all(&slow_work_unreg_wq);
}
Example #25
0
void i2400mu_rx_kick(struct i2400mu *i2400mu)
{
	struct i2400m *i2400m = &i2400mu->i2400m;
	struct device *dev = &i2400mu->usb_iface->dev;

	d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
	atomic_inc(&i2400mu->rx_pending_count);
	wake_up_all(&i2400mu->rx_wq);
	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
Example #26
0
// This function is called when a /dev/osprdX file is finally closed.
// (If the file descriptor was dup2ed, this function is called only when the
// last copy is closed.)
static int osprd_close_last(struct inode *inode, struct file *filp)
{
	if (filp) {
		osprd_info_t *d = file2osprd(filp);
		int filp_writable = filp->f_mode & FMODE_WRITE;

		// EXERCISE: If the user closes a ramdisk file that holds
		// a lock, release the lock.  Also wake up blocked processes
		// as appropriate.

    printk("attempting to close and release\n");
		if ((filp->f_flags & F_OSPRD_LOCKED) != 0)
		{
		  printk("inside filp flag\n");
		  osp_spin_lock(&d->mutex);
		  filp->f_flags &= !F_OSPRD_LOCKED;
		  if (filp_writable)
		  {
		    d->num_write--;
		    d->write_pid = -1;
		  }
		  else
		  {
		    d->num_read--;
		    pid_list_t curr = d->read_pids;
		    // If first on the list
		    if (current->pid == curr->pid)
		      d->read_pids = curr->next;
		    else
		    {
		      while (curr->next != NULL)
		      {
		        // Find pid to skip over and connect
		        if (current->pid == curr->next->pid)
		        {
		          curr->next = curr->next->next;
		          break;
		        }
		        else
		          curr = curr->next;
		      }
		    }
		  }
		  wake_up_all(&d->blockq);
		  osp_spin_unlock(&d->mutex);
		  
		}

		// This line avoids compiler warnings; you may remove it.
		(void) filp_writable, (void) d;

	}

	return 0;
}
Example #27
0
/**
 * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
 * @device: Pointer to the KGSL device structure for the GPU
 * @context: Pointer to the KGSL context structure
 *
 * Invalidate the context and remove all queued commands and cancel any pending
 * waiters
 */
void adreno_drawctxt_invalidate(struct kgsl_device *device,
		struct kgsl_context *context)
{
	struct adreno_context *drawctxt = ADRENO_CONTEXT(context);

	trace_adreno_drawctxt_invalidate(drawctxt);

	spin_lock(&drawctxt->lock);
	drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;

	/*
	 * set the timestamp to the last value since the context is invalidated
	 * and we want the pending events for this context to go away
	 */
	kgsl_sharedmem_writel(device, &device->memstore,
			KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
			drawctxt->timestamp);

	kgsl_sharedmem_writel(device, &device->memstore,
			KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
			drawctxt->timestamp);

	while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
		struct kgsl_cmdbatch *cmdbatch =
			drawctxt->cmdqueue[drawctxt->cmdqueue_head];

		drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
			ADRENO_CONTEXT_CMDQUEUE_SIZE;

		kgsl_cancel_events_timestamp(device, &context->events,
			cmdbatch->timestamp);

		kgsl_cmdbatch_destroy(cmdbatch);
	}

	spin_unlock(&drawctxt->lock);

	/* Give the bad news to everybody waiting around */
	wake_up_all(&drawctxt->waiting);
	wake_up_all(&drawctxt->wq);
}
Example #28
0
static void cleanup_device(osprd_info_t *d)
{
	wake_up_all(&d->blockq);
	if (d->gd) {
		del_gendisk(d->gd);
		put_disk(d->gd);
	}
	if (d->queue)
		blk_cleanup_queue(d->queue);
	if (d->data)
		vfree(d->data);
}
Example #29
0
static int
sh7722gfx_flush( struct file *filp,
                 fl_owner_t   id )
{
     if (jpeg_locked == current->pid) {
          jpeg_locked = 0;

          wake_up_all( &wait_jpeg_lock );
     }
          
     return 0;
}
Example #30
0
/*
* This is the ioctl implementation.
*/
static long kern_unlocked_ioctl(struct file *fp, unsigned int cmd,
		unsigned long arg) {
	struct polldev *pd;
	pd = (struct polldev *)fp->private_data;
	PR_INFO("start %p", pd);
	switch (cmd) {
	case IOCTL_EPOLL_WAKE:
		PR_INFO("in WAKE");
		pd->state = POLLIN;
		wmb();
		wake_up_all(&pd->wq);
		return 0;
	case IOCTL_EPOLL_RESET:
		PR_INFO("in RESET");
		pd->state = 0;
		wmb();
		wake_up_all(&pd->wq);
		return 0;
	}
	return -ENOTTY;
}