static unsigned int sec_nfc_poll(struct file *file, poll_table *wait)
{
	struct sec_nfc_info *info = container_of(file->private_data,
						struct sec_nfc_info, miscdev);
	enum sec_nfc_irq irq;

	int ret = 0;

	dev_dbg(info->dev, "%s: info: %p\n", __func__, info);

	mutex_lock(&info->mutex);

	if (info->state == SEC_NFC_ST_OFF) {
		dev_err(info->dev, "sec_nfc is not enabled\n");
		ret = -ENODEV;
		goto out;
	}

	poll_wait(file, &info->read_wait, wait);

	mutex_lock(&info->read_mutex);
	irq = info->read_irq;
	if (irq == SEC_NFC_INT)
		ret = (POLLIN | POLLRDNORM);
	mutex_unlock(&info->read_mutex);

out:
	mutex_unlock(&info->mutex);

	return ret;
}
Example #2
0
static ssize_t qmi_read(struct file *fp, char __user *buf,
                        size_t count, loff_t *pos)
{
    struct qmi_ctxt *ctxt = fp->private_data;
    char msg[256];
    int len;
    int r;

    mutex_lock(&ctxt->lock);
    for (;;) {
        if (ctxt->state_dirty) {
            ctxt->state_dirty = 0;
            len = qmi_print_state(ctxt, msg, 256);
            break;
        }
        mutex_unlock(&ctxt->lock);
        r = wait_event_interruptible(qmi_wait_queue, ctxt->state_dirty);
        if (r < 0)
            return r;
        mutex_lock(&ctxt->lock);
    }
    mutex_unlock(&ctxt->lock);

    if (len > count)
        len = count;

    if (copy_to_user(buf, msg, len))
        return -EFAULT;

    return len;
}
Example #3
0
/*
 * Finish the current sequence due to disconnect.
 * See mdc_import_event()
 */
void seq_client_flush(struct lu_client_seq *seq)
{
	wait_queue_t link;

	LASSERT(seq != NULL);
	init_waitqueue_entry(&link, current);
	mutex_lock(&seq->lcs_mutex);

	while (seq->lcs_update) {
		add_wait_queue(&seq->lcs_waitq, &link);
		set_current_state(TASK_UNINTERRUPTIBLE);
		mutex_unlock(&seq->lcs_mutex);

		schedule();

		mutex_lock(&seq->lcs_mutex);
		remove_wait_queue(&seq->lcs_waitq, &link);
		set_current_state(TASK_RUNNING);
	}

        fid_zero(&seq->lcs_fid);
        /**
         * this id shld not be used for seq range allocation.
         * set to -1 for dgb check.
         */

        seq->lcs_space.lsr_index = -1;

	lu_seq_range_init(&seq->lcs_space);
	mutex_unlock(&seq->lcs_mutex);
}
Example #4
0
int msm_clear_session_id(int session_id)
{
	int rc = 0;
	int i = 0;
	if (session_id < 1 || session_id > 8)
		return -EINVAL;
	pr_debug("%s: session[%d]\n", __func__, session_id);
	mutex_lock(&routing_info.adm_mutex);
	mutex_lock(&routing_info.copp_list_mutex);
	for (i = 0; i < AFE_MAX_PORTS; i++) {
		if (routing_info.copp_list[session_id][i] != COPP_IGNORE) {
			rc = adm_close(routing_info.copp_list[session_id][i]);
			if (rc < 0) {
				pr_err("%s: adm close fail port[%d] rc[%d]\n",
					__func__,
					routing_info.copp_list[session_id][i],
					rc);
				continue;
			}
#ifdef CONFIG_MSM8X60_RTAC
			rtac_remove_adm_device(
			routing_info.copp_list[session_id][i], session_id);
#endif
			routing_info.copp_list[session_id][i] = COPP_IGNORE;
			rc = 0;
		}
	}
	mutex_unlock(&routing_info.copp_list_mutex);
	mutex_unlock(&routing_info.adm_mutex);

	return rc;
}
Example #5
0
static int isp_video_release(struct file *file)
{
	struct fimc_isp *isp = video_drvdata(file);
	struct fimc_is_video *ivc = &isp->video_capture;
	struct media_entity *entity = &ivc->ve.vdev.entity;
	struct media_device *mdev = entity->graph_obj.mdev;

	mutex_lock(&isp->video_lock);

	if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
		media_entity_pipeline_stop(entity);
		ivc->streaming = 0;
	}

	vb2_fop_release(file);

	if (v4l2_fh_is_singular_file(file)) {
		fimc_pipeline_call(&ivc->ve, close);

		mutex_lock(&mdev->graph_mutex);
		entity->use_count--;
		mutex_unlock(&mdev->graph_mutex);
	}

	pm_runtime_put(&isp->pdev->dev);
	mutex_unlock(&isp->video_lock);

	return 0;
}
void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num)
{
	struct ion_iommu_map *iommu_map;
	struct ion_buffer *buffer;

	mutex_lock(&client->lock);
	buffer = handle->buffer;

	mutex_lock(&buffer->lock);

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);

	if (!iommu_map) {
		WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
				domain_num, partition_num, buffer);
		goto out;
	}

	_ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
	kref_put(&iommu_map->ref, ion_iommu_release);

out:
	mutex_unlock(&buffer->lock);

	mutex_unlock(&client->lock);

}
Example #7
0
static void
ptmalloc_lock_all (void)
{
  mstate ar_ptr;

  if(__malloc_initialized < 1)
    return;
  if (mutex_trylock(&list_lock))
    {
      void *my_arena;
      tsd_getspecific(arena_key, my_arena);
      if (my_arena == ATFORK_ARENA_PTR)
	/* This is the same thread which already locks the global list.
	   Just bump the counter.  */
	goto out;

      /* This thread has to wait its turn.  */
      (void)mutex_lock(&list_lock);
    }
  for(ar_ptr = &main_arena;;) {
    (void)mutex_lock(&ar_ptr->mutex);
    ar_ptr = ar_ptr->next;
    if(ar_ptr == &main_arena) break;
  }
  save_malloc_hook = __malloc_hook;
  save_free_hook = __free_hook;
  __malloc_hook = malloc_atfork;
  __free_hook = free_atfork;
  /* Only the current thread may perform malloc/free calls now. */
  tsd_getspecific(arena_key, save_arena);
  tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
 out:
  ++atfork_recursive_cntr;
}
static ssize_t proximity_state_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	struct cm36686_data *cm36686 = dev_get_drvdata(dev);
	u16 ps_data;

	mutex_lock(&cm36686->power_lock);
	if (!(cm36686->power_state & PROXIMITY_ENABLED)) {
#if defined(CONFIG_SENSORS_CM36686_LEDA_EN_GPIO)
		cm36686_leden_gpio_onoff(cm36686, 1);
#else
	prox_led_onoff(cm36686, 1);
#endif
		cm36686_i2c_write_word(cm36686, REG_PS_CONF1,
			ps_reg_init_setting[PS_CONF1][CMD]);
	}

	mutex_lock(&cm36686->read_lock);
	cm36686_i2c_read_word(cm36686, REG_PS_DATA,
		&ps_data);
	mutex_unlock(&cm36686->read_lock);

	if (!(cm36686->power_state & PROXIMITY_ENABLED)) {
		cm36686_i2c_write_word(cm36686, REG_PS_CONF1,
				0x0001);
#if defined(CONFIG_SENSORS_CM36686_LEDA_EN_GPIO)
		cm36686_leden_gpio_onoff(cm36686, 0);
#else
	prox_led_onoff(cm36686, 0);
#endif
	}
	mutex_unlock(&cm36686->power_lock);

	return sprintf(buf, "%u\n", ps_data);
}
Example #9
0
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
		       struct module *owner)
{
	struct irq_desc *desc;
	int i;

	for (i = 0; i < cnt; i++) {
		desc = alloc_desc(start + i, node, owner);
		if (!desc)
			goto err;
		mutex_lock(&sparse_irq_lock);
		irq_insert_desc(start + i, desc);
		mutex_unlock(&sparse_irq_lock);
	}
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);

	mutex_lock(&sparse_irq_lock);
	bitmap_clear(allocated_irqs, start, cnt);
	mutex_unlock(&sparse_irq_lock);
	return -ENOMEM;
}
Example #10
0
/* Kernel DVB framework calls this when the feed needs to stop. */
static int cx18_dvb_stop_feed(struct dvb_demux_feed *feed)
{
	struct dvb_demux *demux = feed->demux;
	struct cx18_stream *stream = (struct cx18_stream *)demux->priv;
	struct cx18 *cx;
	int ret = -EINVAL;

	if (stream) {
		cx = stream->cx;
		CX18_DEBUG_INFO("Stop feed: pid = 0x%x index = %d\n",
				feed->pid, feed->index);

		mutex_lock(&stream->dvb->feedlock);
		if (--stream->dvb->feeding == 0) {
			CX18_DEBUG_INFO("Stopping Transport DMA\n");
			mutex_lock(&cx->serialize_lock);
			ret = cx18_stop_v4l2_encode_stream(stream, 0);
			mutex_unlock(&cx->serialize_lock);
		} else
			ret = 0;
		mutex_unlock(&stream->dvb->feedlock);
	}

	return ret;
}
Example #11
0
int msm_adsp_get(const char *name, struct msm_adsp_module **out,
		 struct msm_adsp_ops *ops, void *driver_data)
{
	struct msm_adsp_module *module;
	int rc = 0;
	static uint32_t init_info_cmd_sent;

	mutex_lock(&adsp_info.lock);
	MM_INFO("acquired adsp info lock\n");
	if (!init_info_cmd_sent) {
		init_waitqueue_head(&adsp_info.init_info_wait);
		msm_get_init_info();
		rc = wait_event_timeout(adsp_info.init_info_wait,
			adsp_info.init_info_state == ADSP_STATE_INIT_INFO,
			5 * HZ);
		if (!rc) {
			MM_ERR("adsp: INIT_INFO failed\n");
			mutex_unlock(&adsp_info.lock);
			return -ETIMEDOUT;

		}
		init_info_cmd_sent++;
	}
	mutex_unlock(&adsp_info.lock);

	module = find_adsp_module_by_name(&adsp_info, name);
	if (!module)
		return -ENODEV;

	MM_INFO("adsp: opening module %s\n", module->name);
	mutex_lock(&module->lock);

	if (module->ops) {
		rc = -EBUSY;
		goto done;
	}

	rc = adsp_rpc_init(module);
	if (rc)
		goto done;

	module->ops = ops;
	module->driver_data = driver_data;
	*out = module;
	rc = rpc_adsp_rtos_app_to_modem(RPC_ADSP_RTOS_CMD_REGISTER_APP,
					module->id, module);
	if (rc) {
		module->ops = NULL;
		module->driver_data = NULL;
		*out = NULL;
		MM_ERR("adsp: REGISTER_APP failed\n");
		goto done;
	}

	MM_DBG("adsp: module %s has been registered\n", module->name);

done:
	mutex_unlock(&module->lock);
	return rc;
}
Example #12
0
/* First stage of disconnect processing: stop all commands and remove
 * the host */
static void quiesce_and_remove_host(struct rtsx_dev *dev)
{
	struct Scsi_Host *host = rtsx_to_host(dev);
	struct rtsx_chip *chip = dev->chip;

	/* Prevent new transfers, stop the current command, and
	 * interrupt a SCSI-scan or device-reset delay */
	mutex_lock(&dev->dev_mutex);
	scsi_lock(host);
	rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
	scsi_unlock(host);
	mutex_unlock(&dev->dev_mutex);
	wake_up(&dev->delay_wait);

	
	wait_timeout(100);

	/* queuecommand won't accept any new commands and the control
	 * thread won't execute a previously-queued command.  If there
	 * is such a command pending, complete it with an error. */
	mutex_lock(&dev->dev_mutex);
	if (chip->srb) {
		chip->srb->result = DID_NO_CONNECT << 16;
		scsi_lock(host);
		chip->srb->scsi_done(dev->chip->srb);
		chip->srb = NULL;
		scsi_unlock(host);
	}
	mutex_unlock(&dev->dev_mutex);

	
	scsi_remove_host(host);
}
Example #13
0
static ssize_t sstore_write(struct file * file,
        const char __user * buf, size_t lbuf, loff_t * ppos)
{
    int bytes_not_copied = 0;
    struct sstore_blob blob;
    struct sstore_blob * blobp;
    char * new_data;
    struct sstore_dev * devp = file->private_data;

    if(lbuf != sizeof(struct sstore_blob)){
        return -EPERM;
    }

    bytes_not_copied = copy_from_user(&blob, buf, sizeof(struct sstore_blob));
    if(bytes_not_copied != 0){
        return -EIO;
    }

    if(blob.index >= num_of_blobs || blob.size > blob_size){
        return -EPERM;
    }

    /* Allocate space and copy new data into it before locking and updating
       the blobp. */
    new_data = kmalloc(blob.size, GFP_KERNEL);
    if(!new_data){
        return -ENOMEM;
    }
    bytes_not_copied = copy_from_user(new_data, blob.data, blob.size);

    /* Create new blob and point it at the new data. */
    mutex_lock(&devp->sstore_lock);
    if(devp->sstore_blobp[blob.index] == NULL){
        mutex_unlock(&devp->sstore_lock);
        blobp = kmalloc(sizeof(struct sstore_blob), GFP_KERNEL);
        if(!blobp){
            kfree(new_data);
            return -ENOMEM;
        }
        mutex_lock(&devp->sstore_lock);
        devp->sstore_blobp[blob.index] = blobp;
    }
    else{
        mutex_unlock(&devp->sstore_lock);
        kfree(devp->sstore_blobp[blob.index]->data);
        mutex_lock(&devp->sstore_lock);
    }

    devp->sstore_blobp[blob.index]->size = blob.size;
    devp->sstore_blobp[blob.index]->index = blob.index;
    devp->sstore_blobp[blob.index]->data = new_data;
    devp->write_count++;
    mutex_unlock(&devp->sstore_lock);

    /* Notify blocking readers that data is available. */
    wake_up(&devp->sstore_wq);

    printk(KERN_DEBUG "sstore: Successful write.\n");
    return blob.size - bytes_not_copied;
}
static int sec_nfc_open(struct inode *inode, struct file *file)
{
	struct sec_nfc_info *info = container_of(file->private_data,
						struct sec_nfc_info, miscdev);
	int ret = 0;
	uid_t uid;
// Security
	dev_dbg(info->dev, "%s: info : %p" , __func__, info);

    // Check process
	uid = __task_cred(current)->uid;

	if (g_secnfc_uid != uid) {
		dev_err(info->dev, "%s: Un-authorized process. No access to device\n", __func__);
		return -EPERM;
	}
// End of Security

	mutex_lock(&info->mutex);
	if (info->state != SEC_NFC_ST_OFF) {
		dev_err(info->dev, "sec_nfc is busy\n");
		ret = -EBUSY;
		goto out;
	}

#ifdef CONFIG_SEC_NFC_I2C
	mutex_lock(&info->read_mutex);
	info->read_irq = SEC_NFC_NONE;
	mutex_unlock(&info->read_mutex);
	ret = sec_nfc_set_state(info, SEC_NFC_ST_NORM);
#endif
out:
	mutex_unlock(&info->mutex);
	return ret;
}
Example #15
0
File: dir.c Project: cilynx/dd-wrt
/**
 *	sysfs_addrm_start - prepare for sysfs_dirent add/remove
 *	@acxt: pointer to sysfs_addrm_cxt to be used
 *	@parent_sd: parent sysfs_dirent
 *
 *	This function is called when the caller is about to add or
 *	remove sysfs_dirent under @parent_sd.  This function acquires
 *	sysfs_mutex, grabs inode for @parent_sd if available and lock
 *	i_mutex of it.  @acxt is used to keep and pass context to
 *	other addrm functions.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep).  sysfs_mutex is locked on
 *	return.  i_mutex of parent inode is locked on return if
 *	available.
 */
void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
		       struct sysfs_dirent *parent_sd)
{
	struct inode *inode;

	memset(acxt, 0, sizeof(*acxt));
	acxt->parent_sd = parent_sd;

	/* Lookup parent inode.  inode initialization and I_NEW
	 * clearing are protected by sysfs_mutex.  By grabbing it and
	 * looking up with _nowait variant, inode state can be
	 * determined reliably.
	 */
	mutex_lock(&sysfs_mutex);

	inode = ilookup5_nowait(sysfs_sb, parent_sd->s_ino, sysfs_ilookup_test,
				parent_sd);

	if (inode && !(inode->i_state & I_NEW)) {
		/* parent inode available */
		acxt->parent_inode = inode;

		/* sysfs_mutex is below i_mutex in lock hierarchy.
		 * First, trylock i_mutex.  If fails, unlock
		 * sysfs_mutex and lock them in order.
		 */
		if (!mutex_trylock(&inode->i_mutex)) {
			mutex_unlock(&sysfs_mutex);
			mutex_lock(&inode->i_mutex);
			mutex_lock(&sysfs_mutex);
		}
	} else
		iput(inode);
}
Example #16
0
 /*ARGSUSED*/
static bool
svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in)
{
	switch (rq) {
	case SVCGET_XP_FLAGS:
		*(u_int *) in = xprt->xp_flags;
		break;
	case SVCSET_XP_FLAGS:
		xprt->xp_flags = *(u_int *) in;
		break;
	case SVCGET_XP_FREE_USER_DATA:
		mutex_lock(&ops_lock);
		*(svc_xprt_fun_t *) in = xprt->xp_ops->xp_free_user_data;
		mutex_unlock(&ops_lock);
		break;
	case SVCSET_XP_FREE_USER_DATA:
		mutex_lock(&ops_lock);
		xprt->xp_ops->xp_free_user_data = *(svc_xprt_fun_t) in;
		mutex_unlock(&ops_lock);
		break;
	default:
		return (false);
	}
	return (true);
}
Example #17
0
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
	size_t vhost_hlen, sock_hlen, hdr_len;
	int i;

	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
			sizeof(struct virtio_net_hdr);
	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
		/* vhost provides vnet_hdr */
		vhost_hlen = hdr_len;
		sock_hlen = 0;
	} else {
		/* socket provides vnet_hdr */
		vhost_hlen = 0;
		sock_hlen = hdr_len;
	}
	mutex_lock(&n->dev.mutex);
	if ((features & (1 << VHOST_F_LOG_ALL)) &&
	    !vhost_log_access_ok(&n->dev)) {
		mutex_unlock(&n->dev.mutex);
		return -EFAULT;
	}
	n->dev.acked_features = features;
	smp_wmb();
	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
		mutex_lock(&n->vqs[i].mutex);
		n->vqs[i].vhost_hlen = vhost_hlen;
		n->vqs[i].sock_hlen = sock_hlen;
		mutex_unlock(&n->vqs[i].mutex);
	}
	vhost_net_flush(n);
	mutex_unlock(&n->dev.mutex);
	return 0;
}
Example #18
0
void Baton::checkBlock()
{
   POSIX_BATON_DATA &p = *(POSIX_BATON_DATA *)m_data;

   mutex_lock( p.m_mtx );
   p.m_bBusy = false;
   cv_broadcast( p.m_cv );

   bool bb = (p.m_bBlocked && ! pthread_equal( pthread_self(),  p.m_thBlocker ) );

   if ( bb )
   {
      mutex_unlock( p.m_mtx );
      onBlockedAcquire();
      mutex_lock( p.m_mtx );
   }

   while( p.m_bBusy || (p.m_bBlocked && ! pthread_equal( pthread_self(),  p.m_thBlocker ) ) )
   {
      cv_wait( p.m_cv, p.m_mtx );
   }

   p.m_bBusy = true;
   p.m_bBlocked = false;
   mutex_unlock( p.m_mtx );
}
Example #19
0
static int s3c_g3d_lock(struct g3d_context *ctx)
{
	struct g3d_drvdata *data = ctx->data;
	int ret = 0;

	mutex_lock(&data->hw_lock);

	dev_dbg(data->dev, "hardware lock acquired by %p\n", ctx);

	mutex_lock(&data->lock);

	//SEB ret = pm_runtime_get_sync(data->dev);
	if (unlikely(ret < 0)) {
		dev_err(data->dev, "runtime resume failed.\n");
		mutex_unlock(&data->hw_lock);
		goto exit;
	}

	if (likely(data->hw_owner == ctx)) {
		mutex_unlock(&data->lock);
		return 0;
	}

	g3d_flush_pipeline(data, G3D_FGGB_PIPESTAT_MSK);
	g3d_flush_caches(data);
	g3d_invalidate_caches(data);
	data->hw_owner = ctx;
	ret = 1;

exit:
	mutex_unlock(&data->lock);

	return ret;
}
Example #20
0
/* called by: speakup_init() */
int synth_init(char *synth_name)
{
	int i;
	int ret = 0;
	struct spk_synth *synth = NULL;

	if (synth_name == NULL)
		return 0;

	if (strcmp(synth_name, "none") == 0) {
		mutex_lock(&spk_mutex);
		synth_release();
		mutex_unlock(&spk_mutex);
		return 0;
	}

	mutex_lock(&spk_mutex);
	/* First, check if we already have it loaded. */
	for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
		if (strcmp(synths[i]->name, synth_name) == 0)
			synth = synths[i];

	/* If we got one, initialize it now. */
	if (synth)
		ret = do_synth_init(synth);
	else
		ret = -ENODEV;
	mutex_unlock(&spk_mutex);

	return ret;
}
Example #21
0
static int us5182d_read_event_config(struct iio_dev *indio_dev,
	const struct iio_chan_spec *chan, enum iio_event_type type,
	enum iio_event_direction dir)
{
	struct us5182d_data *data = iio_priv(indio_dev);
	int ret;

	switch (dir) {
	case IIO_EV_DIR_RISING:
		mutex_lock(&data->lock);
		ret = data->rising_en;
		mutex_unlock(&data->lock);
		break;
	case IIO_EV_DIR_FALLING:
		mutex_lock(&data->lock);
		ret = data->falling_en;
		mutex_unlock(&data->lock);
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}
Example #22
0
/**
 * ecryptfs_miscdev_release
 * @inode: inode of fs/ecryptfs/euid handle (ignored)
 * @file: file for fs/ecryptfs/euid handle (ignored)
 *
 * This keeps the daemon registered until the daemon sends another
 * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters.
 *
 * Returns zero on success; non-zero otherwise
 */
static int
ecryptfs_miscdev_release(struct inode *inode, struct file *file)
{
	struct ecryptfs_daemon *daemon = NULL;
	uid_t euid = current_euid();
	int rc;

	mutex_lock(&ecryptfs_daemon_hash_mux);
	rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
	BUG_ON(rc || !daemon);
	mutex_lock(&daemon->mux);
	BUG_ON(daemon->pid != task_pid(current));
	BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
	daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
	atomic_dec(&ecryptfs_num_miscdev_opens);
	mutex_unlock(&daemon->mux);
	rc = ecryptfs_exorcise_daemon(daemon);
	if (rc) {
		printk(KERN_CRIT "%s: Fatal error whilst attempting to "
		       "shut down daemon; rc = [%d]. Please report this "
		       "bug.\n", __func__, rc);
		BUG();
	}
	module_put(THIS_MODULE);
	mutex_unlock(&ecryptfs_daemon_hash_mux);
	return rc;
}
Example #23
0
int msm_clear_all_session()
{
	int rc = 0;
	int i = 0, j = 0;
	pr_info("%s:\n", __func__);
	mutex_lock(&routing_info.adm_mutex);
	mutex_lock(&routing_info.copp_list_mutex);
	for (j = 1; j < MAX_SESSIONS; j++) {
		for (i = 0; i < AFE_MAX_PORTS; i++) {
			if (routing_info.copp_list[j][i] != COPP_IGNORE) {
				rc = adm_close(
					routing_info.copp_list[j][i]);
				if (rc < 0) {
					pr_err("%s: adm close fail copp[%d]"
					"session[%d] rc[%d]\n",
					__func__,
					routing_info.copp_list[j][i],
					j, rc);
					continue;
				}
				routing_info.copp_list[j][i] = COPP_IGNORE;
				rc = 0;
			}
		}
	}
	mutex_unlock(&routing_info.copp_list_mutex);
	mutex_unlock(&routing_info.adm_mutex);
	return rc;
}
Example #24
0
/**
 * ecryptfs_send_miscdev
 * @data: Data to send to daemon; may be NULL
 * @data_size: Amount of data to send to daemon
 * @msg_ctx: Message context, which is used to handle the reply. If
 *           this is NULL, then we do not expect a reply.
 * @msg_type: Type of message
 * @msg_flags: Flags for message
 * @daemon: eCryptfs daemon object
 *
 * Add msg_ctx to queue and then, if it exists, notify the blocked
 * miscdevess about the data being available. Must be called with
 * ecryptfs_daemon_hash_mux held.
 *
 * Returns zero on success; non-zero otherwise
 */
int ecryptfs_send_miscdev(char *data, size_t data_size,
			  struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
			  u16 msg_flags, struct ecryptfs_daemon *daemon)
{
	int rc = 0;

	mutex_lock(&msg_ctx->mux);
	msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
			       GFP_KERNEL);
	if (!msg_ctx->msg) {
		rc = -ENOMEM;
		printk(KERN_ERR "%s: Out of memory whilst attempting "
		       "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
		       (sizeof(*msg_ctx->msg) + data_size));
		goto out_unlock;
	}
	msg_ctx->msg->index = msg_ctx->index;
	msg_ctx->msg->data_len = data_size;
	msg_ctx->type = msg_type;
	memcpy(msg_ctx->msg->data, data, data_size);
	msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
	mutex_lock(&daemon->mux);
	list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
	daemon->num_queued_msg_ctx++;
	wake_up_interruptible(&daemon->wait);
	mutex_unlock(&daemon->mux);
out_unlock:
	mutex_unlock(&msg_ctx->mux);
	return rc;
}
/**
 *	usb_tranzport_disconnect
 *
 *	Called by the usb core when the device is removed from the system.
 */
static void usb_tranzport_disconnect(struct usb_interface *intf)
{
	struct usb_tranzport *dev;
	int minor;
	mutex_lock(&disconnect_mutex);
	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	mutex_lock(&dev->mtx);
	minor = intf->minor;
	/* give back our minor */
	usb_deregister_dev(intf, &usb_tranzport_class);

	/* if the device is not opened, then we clean up right now */
	if (!dev->open_count) {
		mutex_unlock(&dev->mtx);
		usb_tranzport_delete(dev);
	} else {
		dev->intf = NULL;
		mutex_unlock(&dev->mtx);
	}

	mutex_unlock(&disconnect_mutex);

	dev_info(&intf->dev, "Tranzport Surface #%d now disconnected\n",
		(minor - USB_TRANZPORT_MINOR_BASE));
}
Example #26
0
/**
 * ecryptfs_miscdev_poll
 * @file: dev file (ignored)
 * @pt: dev poll table (ignored)
 *
 * Returns the poll mask
 */
static unsigned int
ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
{
	struct ecryptfs_daemon *daemon;
	unsigned int mask = 0;
	uid_t euid = current_euid();
	int rc;

	mutex_lock(&ecryptfs_daemon_hash_mux);
	/* TODO: Just use file->private_data? */
	rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
	BUG_ON(rc || !daemon);
	mutex_lock(&daemon->mux);
	mutex_unlock(&ecryptfs_daemon_hash_mux);
	if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
		printk(KERN_WARNING "%s: Attempt to poll on zombified "
		       "daemon\n", __func__);
		goto out_unlock_daemon;
	}
	if (daemon->flags & ECRYPTFS_DAEMON_IN_READ)
		goto out_unlock_daemon;
	if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)
		goto out_unlock_daemon;
	daemon->flags |= ECRYPTFS_DAEMON_IN_POLL;
	mutex_unlock(&daemon->mux);
	poll_wait(file, &daemon->wait, pt);
	mutex_lock(&daemon->mux);
	if (!list_empty(&daemon->msg_ctx_out_queue))
		mask |= POLLIN | POLLRDNORM;
out_unlock_daemon:
	daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL;
	mutex_unlock(&daemon->mux);
	return mask;
}
Example #27
0
static int vocpcm_open(struct inode *inode, struct file *file)
{
    struct voc_rpc *voc_rpc = &the_voc_proc;
    struct voc_ctxt *ctxt = voc_minor_to_ctxt(MINOR(inode->i_rdev));
    int rc = 0;

    if (!ctxt) {
        pr_err("unknown voc misc %d\n", MINOR(inode->i_rdev));
        return -ENODEV;
    }

    mutex_lock(&voc_rpc->lock);
    if (voc_rpc->inited == 0) {
        voc_rpc->ept = create_rpc_connect(RPC_SND_PROG, RPC_SND_VERS,
                                          MSM_RPC_UNINTERRUPTIBLE |
                                          MSM_RPC_ENABLE_RECEIVE);
        if (IS_ERR(voc_rpc->ept)) {
            rc = PTR_ERR(voc_rpc->ept);
            voc_rpc->ept = NULL;
            pr_err("vocpcm: failed to connect snd svc\n");
            return rc;
        }

        voc_rpc->task = create_snd_rpc_kthread();
        if (IS_ERR(voc_rpc->task)) {
            rc = PTR_ERR(voc_rpc->task);
            voc_rpc->task = NULL;
            msm_rpc_close(voc_rpc->ept);
            voc_rpc->ept = NULL;
            return rc;
        }
        voc_rpc->inited = 1;
    }
    mutex_unlock(&voc_rpc->lock);

    mutex_lock(&ctxt->lock);
    if (ctxt->opened) {
        pr_err("vocpcm already opened.\n");
        rc = -EBUSY;
        goto err;
    }

    file->private_data = ctxt;
    ctxt->head = 0;
    ctxt->tail = 0;
    ctxt->client = 0;

    memset(ctxt->buf[0].data, 0, sizeof(ctxt->buf[0].data));
    memset(ctxt->buf[1].data, 0, sizeof(ctxt->buf[1].data));
    ctxt->buf[0].index = 0;
    ctxt->buf[1].index = 0;
    ctxt->opened = 1;
    ctxt->count = 0;
    ctxt->s_ptr = 0;
    ctxt->final_input = 0;

err:
    mutex_unlock(&ctxt->lock);
    return rc;
}
static int hp_state_set(const char *arg, const struct kernel_param *kp)
{
	int ret = 0;
	int old_state;

	if (!tegra3_cpu_lock)
		return ret;

	mutex_lock(tegra3_cpu_lock);

	old_state = hp_state;
	ret = param_set_bool(arg, kp);	/* set idle or disabled only */

	if (ret == 0) {
		if ((hp_state == TEGRA_HP_DISABLED) &&
		    (old_state != TEGRA_HP_DISABLED)) {
			mutex_unlock(tegra3_cpu_lock);
			cancel_delayed_work_sync(&hotplug_work);
			mutex_lock(tegra3_cpu_lock);
			pr_info("Tegra auto-hotplug disabled\n");
		} else if (hp_state != TEGRA_HP_DISABLED) {
			if (old_state == TEGRA_HP_DISABLED) {
				pr_info("Tegra auto-hotplug enabled\n");
				hp_init_stats();
			}
			/* catch-up with governor target speed */
			tegra_cpu_set_speed_cap(NULL);
		}
	} else
		pr_warn("%s: unable to set tegra hotplug state %s\n",
				__func__, arg);

	mutex_unlock(tegra3_cpu_lock);
	return ret;
}
Example #29
0
static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
	struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
	struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
	struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
	struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
	int ret = 0;

	if (!v4l2_fh_is_singular(&fh->vfh))
		return 0;

	if (led_cdev) {
		mutex_lock(&led_cdev->led_access);

		if (v4l2_flash->ctrls[STROBE_SOURCE])
			ret = v4l2_ctrl_s_ctrl(
				v4l2_flash->ctrls[STROBE_SOURCE],
				V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
		led_sysfs_enable(led_cdev);

		mutex_unlock(&led_cdev->led_access);
	}

	if (led_cdev_ind) {
		mutex_lock(&led_cdev_ind->led_access);
		led_sysfs_enable(led_cdev_ind);
		mutex_unlock(&led_cdev_ind->led_access);
	}

	return ret;
}
Example #30
0
static int ltr501_write_intr_prst(struct ltr501_data *data,
				  enum iio_chan_type type,
				  int val, int val2)
{
	int ret, samp_period, new_val;
	unsigned long period;

	if (val < 0 || val2 < 0)
		return -EINVAL;

	/* period in microseconds */
	period = ((val * 1000000) + val2);

	switch (type) {
	case IIO_INTENSITY:
		ret = ltr501_als_read_samp_period(data, &samp_period);
		if (ret < 0)
			return ret;

		/* period should be atleast equal to sampling period */
		if (period < samp_period)
			return -EINVAL;

		new_val = DIV_ROUND_UP(period, samp_period);
		if (new_val < 0 || new_val > 0x0f)
			return -EINVAL;

		mutex_lock(&data->lock_als);
		ret = regmap_field_write(data->reg_als_prst, new_val);
		mutex_unlock(&data->lock_als);
		if (ret >= 0)
			data->als_period = period;

		return ret;
	case IIO_PROXIMITY:
		ret = ltr501_ps_read_samp_period(data, &samp_period);
		if (ret < 0)
			return ret;

		/* period should be atleast equal to rate */
		if (period < samp_period)
			return -EINVAL;

		new_val = DIV_ROUND_UP(period, samp_period);
		if (new_val < 0 || new_val > 0x0f)
			return -EINVAL;

		mutex_lock(&data->lock_ps);
		ret = regmap_field_write(data->reg_ps_prst, new_val);
		mutex_unlock(&data->lock_ps);
		if (ret >= 0)
			data->ps_period = period;

		return ret;
	default:
		return -EINVAL;
	}

	return -EINVAL;
}