static ssize_t isert_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct isert_conn_dev *dev = filp->private_data; size_t to_read; if (dev->state == CS_DISCONNECTED) return -EPIPE; if (will_read_block(dev)) { int ret; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_freezable(dev->waitqueue, !will_read_block(dev)); if (ret < 0) return ret; } to_read = min(count, dev->read_len); if (copy_to_user(buf, dev->read_buf, to_read)) return -EFAULT; dev->read_len -= to_read; dev->read_buf += to_read; switch (dev->state) { case CS_REQ_BHS: if (dev->read_len == 0) { dev->read_len = dev->login_req->bufflen; dev->sg_virt = isert_vmap_sg(dev->pages, dev->login_req->sg, dev->login_req->sg_cnt); if (!dev->sg_virt) return -ENOMEM; dev->read_buf = dev->sg_virt + ISER_HDRS_SZ; dev->state = CS_REQ_DATA; } break; case CS_REQ_DATA: if (dev->read_len == 0) { vunmap(dev->sg_virt); dev->sg_virt = NULL; spin_lock(&dev->pdu_lock); dev->login_req = NULL; dev->state = CS_REQ_FINISHED; spin_unlock(&dev->pdu_lock); } break; default: PRINT_ERROR("Invalid state in %s (%d)\n", __func__, dev->state); to_read = 0; } return to_read; }
/* afs_osi_SleepSig * * Waits for an event to be notified, returning early if a signal * is received. Returns EINTR if signaled, and 0 otherwise. */ int afs_osi_SleepSig(void *event) { struct afs_event *evp; int seq, retval; int code; evp = afs_getevent(event); if (!evp) { afs_addevent(event); evp = afs_getevent(event); } seq = evp->seq; retval = 0; AFS_GUNLOCK(); code = wait_event_freezable(evp->cond, seq != evp->seq); AFS_GLOCK(); if (code == -ERESTARTSYS) code = EINTR; else code = -code; relevent(evp); return code; }
static ssize_t rcu_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { int ret = 0; flag = 0; printk("%s: %d\n", __FUNCTION__, __LINE__); while (flag != 1) { ret = wait_event_freezable(waitq, flag == 1); // printk_ratelimit printk("ret = %d\n", ret); } printk("%s: %d\n", __FUNCTION__, __LINE__); return 1; }
static ssize_t isert_listen_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct isert_listener_dev *dev = filp->private_data; struct isert_conn_dev *conn_dev; int res = 0; char k_buff[sizeof("/dev/") + sizeof(ISER_CONN_DEV_PREFIX) + 3 + 1]; size_t to_write; TRACE_ENTRY(); if (!have_new_connection(dev)) { wait_for_connection: if (filp->f_flags & O_NONBLOCK) return -EAGAIN; res = wait_event_freezable(dev->waitqueue, !have_new_connection(dev)); if (res < 0) goto out; } spin_lock(&dev->conn_lock); if (list_empty(&dev->new_conn_list)) { /* could happen if we got disconnect */ spin_unlock(&dev->conn_lock); goto wait_for_connection; } conn_dev = list_first_entry(&dev->new_conn_list, struct isert_conn_dev, conn_list_entry); list_move(&conn_dev->conn_list_entry, &dev->curr_conn_list); spin_unlock(&dev->conn_lock); to_write = min_t(size_t, sizeof(k_buff), count); res = scnprintf(k_buff, to_write, "/dev/"ISER_CONN_DEV_PREFIX"%d", conn_dev->idx); ++res; /* copy trailing \0 as well */ if (unlikely(copy_to_user(buf, k_buff, res))) res = -EFAULT; out: TRACE_EXIT_RES(res); return res; }
static int oom_reaper(void *unused) { while (true) { struct task_struct *tsk = NULL; wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); spin_lock(&oom_reaper_lock); if (oom_reaper_list != NULL) { tsk = oom_reaper_list; oom_reaper_list = tsk->oom_reaper_list; } spin_unlock(&oom_reaper_lock); if (tsk) oom_reap_task(tsk); } return 0; }
static void wait_for_dump_helpers(struct file *file) { struct pipe_inode_info *pipe; pipe = file_inode(file)->i_pipe; pipe_lock(pipe); pipe->readers++; pipe->writers--; wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_unlock(pipe); wait_event_freezable(pipe->wait, pipe->readers == 1); pipe_lock(pipe); pipe->readers--; pipe->writers++; pipe_unlock(pipe); }
/** * Slab mover thread. * Sits waiting for a condition to jump off and shovel some memory about */ static int mc_slab_rebalance(void *ignore) { int was_busy = 0; set_freezable(); while (1) { wait_event_freezable(slab_rebal.wq, slab_rebal.signal || kthread_should_stop()); if (kthread_should_stop()) break; mutex_lock(&slab_rebal.lock); if (slab_rebal.signal == 1) { if (mc_slab_rebalance_start() < 0) { /* Handle errors with more specifity as required. */ slab_rebal.signal = 0; } was_busy = 0; } else if (slab_rebal.signal && slab_rebal.slab_start) { was_busy = mc_slab_rebalance_move(); } if (slab_rebal.done) { mc_slab_rebalance_finish(); } else if (was_busy) { /* * Stuck waiting for some items to unlock, so slow down * a bit to give them a change to free up. */ msleep(1); } mutex_unlock(&slab_rebal.lock); } return 0; }
static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { unsigned long flags; int ret; struct snd_lsm_sound_model snd_model; struct snd_lsm_sound_model_v2 snd_model_v2; struct snd_lsm_session_data session_data; int rc = 0; int xchg = 0; u32 size = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct lsm_priv *prtd = runtime->private_data; struct snd_lsm_event_status *user = arg; struct snd_lsm_detection_params det_params; uint8_t *confidence_level = NULL; pr_debug("%s: enter cmd %x\n", __func__, cmd); switch (cmd) { case SNDRV_LSM_SET_SESSION_DATA: pr_debug("%s: set Session data\n", __func__); memcpy(&session_data, arg, sizeof(struct snd_lsm_session_data)); if (prtd) { if (session_data.app_id <= LSM_VOICE_WAKEUP_APP_ID_V2 && session_data.app_id > 0) { prtd->lsm_client->app_id = session_data.app_id; ret = q6lsm_open(prtd->lsm_client, prtd->lsm_client->app_id); if (ret < 0) { pr_err("%s: lsm open failed, %d\n", __func__, ret); q6lsm_client_free(prtd->lsm_client); kfree(prtd); return ret; } pr_debug("%s: Session ID %d\n", __func__, prtd->lsm_client->session); } else { pr_err("%s:Invalid App id for Listen client\n", __func__); rc = -EINVAL; } } else { pr_err("%s: LSM Priv data is NULL\n", __func__); rc = -EINVAL; } break; case SNDRV_LSM_REG_SND_MODEL_V2: pr_debug("%s: Registering sound model V2\n", __func__); memcpy(&snd_model_v2, arg, sizeof(struct snd_lsm_sound_model_v2)); if (snd_model_v2.num_confidence_levels > MAX_NUM_CONFIDENCE) { pr_err("%s: Invalid conf_levels = %d, maximum allowed = %d\n", __func__, snd_model_v2.num_confidence_levels, MAX_NUM_CONFIDENCE); rc = -EINVAL; break; } prtd->lsm_client->snd_model_ver_inuse = SND_MODEL_IN_USE_V2; rc = q6lsm_snd_model_buf_alloc(prtd->lsm_client, snd_model_v2.data_size); if (rc) { pr_err("%s: q6lsm buffer alloc failed V2, size %d\n", __func__, snd_model_v2.data_size); break; } if (copy_from_user(prtd->lsm_client->sound_model.data, snd_model_v2.data, snd_model_v2.data_size)) { pr_err("%s: copy from user data failed\n" "data %p size %d\n", __func__, snd_model_v2.data, snd_model_v2.data_size); q6lsm_snd_model_buf_free(prtd->lsm_client); rc = -EFAULT; break; } pr_debug("SND Model Magic no byte[0] %x,\n" "byte[1] %x, byte[2] %x byte[3] %x\n", snd_model_v2.data[0], snd_model_v2.data[1], snd_model_v2.data[2], snd_model_v2.data[3]); prtd->lsm_client->num_confidence_levels = snd_model_v2.num_confidence_levels; rc = msm_lsm_get_conf_levels(prtd->lsm_client, snd_model_v2.confidence_level); if (rc) { pr_err("%s: get_conf_levels failed, err = %d\n", __func__, rc); break; } rc = q6lsm_register_sound_model(prtd->lsm_client, snd_model_v2.detection_mode, snd_model_v2.detect_failure); if (rc < 0) { pr_err("%s: Register snd Model v2 failed =%d\n", __func__, rc); kfree(confidence_level); q6lsm_snd_model_buf_free(prtd->lsm_client); } kfree(prtd->lsm_client->confidence_levels); prtd->lsm_client->confidence_levels = NULL; break; case SNDRV_LSM_SET_PARAMS: if (!arg) { pr_err("%s: %s Invalid argument\n", __func__, "SNDRV_LSM_SET_PARAMS"); return -EINVAL; } memcpy(&det_params, arg, sizeof(det_params)); if (det_params.num_confidence_levels > MAX_NUM_CONFIDENCE) { rc = -EINVAL; break; } prtd->lsm_client->num_confidence_levels = det_params.num_confidence_levels; rc = msm_lsm_get_conf_levels(prtd->lsm_client, det_params.conf_level); if (rc) { pr_err("%s: Failed to get conf_levels, err = %d\n", __func__, rc); break; } rc = q6lsm_set_data(prtd->lsm_client, det_params.detect_mode, det_params.detect_failure); if (rc) pr_err("%s: Failed to set params, err = %d\n", __func__, rc); kfree(prtd->lsm_client->confidence_levels); prtd->lsm_client->confidence_levels = NULL; break; case SNDRV_LSM_REG_SND_MODEL: pr_debug("%s: Registering sound model\n", __func__); memcpy(&snd_model, arg, sizeof(struct snd_lsm_sound_model)); prtd->lsm_client->snd_model_ver_inuse = SND_MODEL_IN_USE_V1; rc = q6lsm_snd_model_buf_alloc(prtd->lsm_client, snd_model.data_size); if (rc) { pr_err("%s: q6lsm buffer alloc failed, size %d\n", __func__, snd_model.data_size); break; } if (copy_from_user(prtd->lsm_client->sound_model.data, snd_model.data, snd_model.data_size)) { pr_err("%s: copy from user data failed data %p size %d\n", __func__, snd_model.data, snd_model.data_size); rc = -EFAULT; q6lsm_snd_model_buf_free(prtd->lsm_client); break; } rc = q6lsm_set_kw_sensitivity_level(prtd->lsm_client, snd_model.min_keyw_confidence, snd_model.min_user_confidence); if (rc) { pr_err("%s: Error in KW sensitivity %x", __func__, rc); q6lsm_snd_model_buf_free(prtd->lsm_client); break; } rc = q6lsm_register_sound_model(prtd->lsm_client, snd_model.detection_mode, snd_model.detect_failure); if (rc < 0) { pr_err("%s: q6lsm_register_sound_model failed =%d\n", __func__, rc); q6lsm_snd_model_buf_free(prtd->lsm_client); } break; case SNDRV_LSM_DEREG_SND_MODEL: pr_debug("%s: Deregistering sound model\n", __func__); rc = q6lsm_deregister_sound_model(prtd->lsm_client); break; case SNDRV_LSM_EVENT_STATUS: pr_debug("%s: Get event status\n", __func__); atomic_set(&prtd->event_wait_stop, 0); rc = wait_event_freezable(prtd->event_wait, (cmpxchg(&prtd->event_avail, 1, 0) || (xchg = atomic_cmpxchg(&prtd->event_wait_stop, 1, 0)))); pr_debug("%s: wait_event_freezable %d event_wait_stop %d\n", __func__, rc, xchg); if (!rc && !xchg) { pr_debug("%s: New event available %ld\n", __func__, prtd->event_avail); spin_lock_irqsave(&prtd->event_lock, flags); if (prtd->event_status) { size = sizeof(*(prtd->event_status)) + prtd->event_status->payload_size; spin_unlock_irqrestore(&prtd->event_lock, flags); } else { spin_unlock_irqrestore(&prtd->event_lock, flags); rc = -EINVAL; pr_err("%s: prtd->event_status is NULL\n", __func__); break; } if (user->payload_size < prtd->event_status->payload_size) { pr_debug("%s: provided %dbytes isn't enough, needs %dbytes\n", __func__, user->payload_size, prtd->event_status->payload_size); rc = -ENOMEM; } else { memcpy(user, prtd->event_status, size); if (prtd->lsm_client->lab_enable && atomic_read(&prtd->read_abort) && prtd->event_status->status == LSM_VOICE_WAKEUP_STATUS_DETECTED) { atomic_set(&prtd->read_abort, 0); atomic_set(&prtd->buf_count, 0); prtd->appl_cnt = 0; prtd->dma_write = 0; rc = msm_lsm_queue_lab_buffer(prtd, 0); if (rc) pr_err("%s: Queue buffer failed for lab rc = %d\n", __func__, rc); else prtd->lsm_client->lab_started = true; } } } else if (xchg) { pr_debug("%s: Wait aborted\n", __func__); rc = 0; } break; case SNDRV_LSM_ABORT_EVENT: pr_debug("%s: Aborting event status wait\n", __func__); atomic_set(&prtd->event_wait_stop, 1); wake_up(&prtd->event_wait); break; case SNDRV_LSM_START: pr_debug("%s: Starting LSM client session\n", __func__); if (!prtd->lsm_client->started) { if (prtd->lsm_client->lab_enable && !prtd->lsm_client->lab_started) { atomic_set(&prtd->read_abort, 0); /* Push the first period buffer */ ret = msm_lsm_queue_lab_buffer(prtd, 0); if (ret) { pr_err("%s: failed to queue buffers for LAB read %d\n" , __func__, ret); break; } prtd->lsm_client->lab_started = true; } ret = q6lsm_start(prtd->lsm_client, true); if (!ret) { prtd->lsm_client->started = true; pr_debug("%s: LSM client session started\n", __func__); } } break; case SNDRV_LSM_STOP: { pr_debug("%s: Stopping LSM client session\n", __func__); if (prtd->lsm_client->started) { if (prtd->lsm_client->lab_enable) { atomic_set(&prtd->read_abort, 1); if (prtd->lsm_client->lab_started) { ret = q6lsm_stop_lab(prtd->lsm_client); if (ret) pr_err("%s: stop lab failed ret %d\n", __func__, ret); prtd->lsm_client->lab_started = false; } ret = msm_lsm_lab_buffer_alloc(prtd, LAB_BUFFER_DEALLOC); if (ret) pr_err("%s: lab buffer de-alloc failed rc %d", __func__, rc); } ret = q6lsm_stop(prtd->lsm_client, true); if (!ret) pr_debug("%s: LSM client session stopped %d\n", __func__, ret); prtd->lsm_client->started = false; } break; } case SNDRV_LSM_LAB_CONTROL: { u32 *enable = NULL; pr_debug("%s: ioctl %s\n", __func__, "SNDRV_LSM_LAB_CONTROL"); if (!arg) { pr_err("%s: Invalid param arg for ioctl %s session %d\n", __func__, "SNDRV_LSM_LAB_CONTROL", prtd->lsm_client->session); rc = -EINVAL; break; } enable = (int *)arg; if (!prtd->lsm_client->started) { if (prtd->lsm_client->lab_enable == *enable) { pr_info("%s: Lab for session %d already %s\n", __func__, prtd->lsm_client->session, ((*enable) ? "enabled" : "disabled")); rc = 0; break; } rc = q6lsm_lab_control(prtd->lsm_client, *enable); if (rc) pr_err("%s: ioctl %s failed rc %d to %s lab for session %d\n", __func__, "SNDRV_LAB_CONTROL", rc, ((*enable) ? "enable" : "disable"), prtd->lsm_client->session); else { rc = msm_lsm_lab_buffer_alloc(prtd, ((*enable) ? LAB_BUFFER_ALLOC : LAB_BUFFER_DEALLOC)); if (rc) pr_err("%s: msm_lsm_lab_buffer_alloc failed rc %d for %s", __func__, rc, ((*enable) ? "ALLOC" : "DEALLOC")); if (!rc) prtd->lsm_client->lab_enable = *enable; } } else { pr_err("%s: ioctl %s issued after start", __func__ , "SNDRV_LSM_LAB_CONTROL"); rc = -EINVAL; } break; } case SNDRV_LSM_STOP_LAB: if (prtd->lsm_client->lab_enable && prtd->lsm_client->lab_started) { atomic_set(&prtd->read_abort, 1); rc = q6lsm_stop_lab(prtd->lsm_client); if (rc) pr_err("%s: Lab stop failed for session %d rc %d\n" , __func__, prtd->lsm_client->session, rc); prtd->lsm_client->lab_started = false; } break; default: pr_debug("%s: Falling into default snd_lib_ioctl cmd 0x%x\n", __func__, cmd); rc = snd_pcm_lib_ioctl(substream, cmd, arg); break; } if (!rc) pr_debug("%s: leave (%d)\n", __func__, rc); else pr_err("%s: cmd 0x%x failed %d\n", __func__, cmd, rc); return rc; }
static int mc_hash_thread(void *ignore) { set_freezable(); mc_slabs_rebalancer_pause(); while (!test_bit(ZOMBIE, &hashflags)) { int ii = 0; /* * Lock the cache, and bulk move multiple buckets to * the new hash table. */ mc_item_lock_global(); mutex_lock(&cache_lock); for (ii = 0; ii < settings.hash_bulk_move && test_bit(EXPANDING, &hashflags); ii++) { item *it, *next; int bucket; for (it = old_hashtable[expand_bucket]; it; it = next) { next = it->h_next; bucket = hash(ITEM_key(it), it->nkey, 0) & hashmask(hashpower); it->h_next = primary_hashtable[bucket]; primary_hashtable[bucket] = it; } old_hashtable[expand_bucket] = NULL; expand_bucket++; if (expand_bucket == hashsize(hashpower - 1)) { clear_bit(EXPANDING, &hashflags); clear_bit(SEXPANDING, &hashflags); free_buffer(&old_hts); ATOMIC64_SUB(stats.hash_bytes, hashsize(hashpower - 1) * sizeof(void *)); clear_bit(STATS_HASH_EXP, &stats.flags); PVERBOSE(1, "hash table expansion done\n"); } } mutex_unlock(&cache_lock); mc_item_unlock_global(); if (!test_bit(EXPANDING, &hashflags)) { /* * finished expanding. tell all threads to use * fine-grained locks. */ mc_switch_item_lock_type(ITEM_LOCK_GRANULAR); mc_slabs_rebalancer_resume(); /* * We are done expanding.. just wait for next invocation */ wait_event_freezable(hash_wait_queue, test_bit(SEXPANDING, &hashflags) || kthread_should_stop()); if (test_bit(ZOMBIE, &hashflags)) { goto out; } /* before doing anything, tell threads to use a global lock */ mc_slabs_rebalancer_pause(); mc_switch_item_lock_type(ITEM_LOCK_GLOBAL); mutex_lock(&cache_lock); mc_hash_expand(); mutex_unlock(&cache_lock); } } out: return 0; }