static void gs_close(struct tty_struct *tty, struct file *file) { struct gs_port *port = tty->driver_data; struct gserial *gser; spin_lock_irq(&port->port_lock); if (port->open_count != 1) { if (port->open_count == 0) WARN_ON(1); else --port->open_count; goto exit; } pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file); /* mark port as closing but in use; we can drop port lock * and sleep if necessary */ port->openclose = true; port->open_count = 0; gser = port->port_usb; if (gser && gser->disconnect) gser->disconnect(gser); /* wait for circular write buffer to drain, disconnect, or at * most GS_CLOSE_TIMEOUT seconds; then discard the rest */ if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { spin_unlock_irq(&port->port_lock); wait_event_interruptible_timeout(port->drain_wait, gs_writes_finished(port), GS_CLOSE_TIMEOUT * HZ); spin_lock_irq(&port->port_lock); gser = port->port_usb; } /* Iff we're disconnected, there can be no I/O in flight so it's * ok to free the circular buffer; else just scrub it. And don't * let the push work queue fire again until we're re-opened. */ if (gser == NULL) gs_buf_free(&port->port_write_buf); else gs_buf_clear(&port->port_write_buf); tty->driver_data = NULL; port->port_tty = NULL; port->openclose = false; pr_debug("gs_close: ttyGS%d (%p,%p) done!\n", port->port_num, tty, file); wake_up_interruptible(&port->close_wait); /* * Freeing the previously queued requests as they are * allocated again as a part of gs_open() */ if (port->port_usb) { spin_unlock_irq(&port->port_lock); usb_ep_fifo_flush(gser->out); usb_ep_fifo_flush(gser->in); spin_lock_irq(&port->port_lock); gs_free_requests(gser->out, &port->read_queue, NULL); gs_free_requests(gser->out, &port->read_pool, NULL); gs_free_requests(gser->in, &port->write_pool, NULL); } port->read_allocated = port->read_started = port->write_allocated = port->write_started = 0; exit: spin_unlock_irq(&port->port_lock); }
/* ------------------- device --------------------- */ static long auda2dp_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_a2dp_in *audio = file->private_data; int rc = 0; if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { uint32_t freq; /* Poll at 48KHz always */ freq = 48000; MM_DBG("AUDIO_START\n"); rc = msm_snddev_request_freq(&freq, audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("sample rate configured %d sample rate requested %d\n", freq, audio->samp_rate); if (rc < 0) { MM_DBG("sample rate can not be set, return code %d\n",\ rc); msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); break; } rc = auda2dp_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) { rc = -ENODEV; msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); } else rc = 0; } break; } case AUDIO_STOP: { rc = auda2dp_in_disable(audio); rc = msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); audio->stopped = 1; audio->abort = 0; break; } case AUDIO_FLUSH: { if (audio->stopped) { /* Make sure we're stopped and we wake any threads * that might be blocked holding the read_lock. * While audio->stopped read threads will always * exit immediately. */ wake_up(&audio->wait); mutex_lock(&audio->read_lock); auda2dp_in_flush(audio); mutex_unlock(&audio->read_lock); } break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if ((audio->enc_type == ENC_TYPE_SBC) && (cfg.buffer_size != FRAME_SIZE_SBC)) rc = -EINVAL; else audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); if (audio->enc_type == ENC_TYPE_SBC) cfg.buffer_size = FRAME_SIZE_SBC; else cfg.buffer_size = MONO_DATA_SIZE; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_SET_SBC_ENC_CONFIG: { if (copy_from_user(&audio->cfg, (void *) arg, sizeof(audio->cfg))) { rc = -EFAULT; break; } audio->samp_rate = audio->cfg.sample_rate; audio->channel_mode = audio->cfg.channels; audio->enc_type = ENC_TYPE_SBC; break; } case AUDIO_SET_CONFIG: { struct msm_audio_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } if (cfg.channel_count == 1) { cfg.channel_count = AUDREC_CMD_MODE_MONO; audio->buffer_size = MONO_DATA_SIZE; } else if (cfg.channel_count == 2) { cfg.channel_count = AUDREC_CMD_MODE_STEREO; audio->buffer_size = STEREO_DATA_SIZE; } else { rc = -EINVAL; break; } audio->samp_rate = cfg.sample_rate; audio->channel_mode = cfg.channel_count; audio->enc_type = ENC_TYPE_WAV; break; } case AUDIO_GET_SBC_ENC_CONFIG: { struct msm_audio_sbc_enc_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.bit_allocation = audio->cfg.bit_allocation; cfg.mode = audio->cfg.mode; cfg.number_of_subbands = audio->cfg.number_of_subbands; cfg.number_of_blocks = audio->cfg.number_of_blocks; cfg.sample_rate = audio->samp_rate; cfg.channels = audio->channel_mode; cfg.bit_rate = audio->cfg.bit_rate; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_GET_CONFIG: { struct msm_audio_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_count = FRAME_NUM; cfg.sample_rate = audio->samp_rate; if (audio->channel_mode == AUDREC_CMD_MODE_MONO) { cfg.channel_count = 1; cfg.buffer_size = MONO_DATA_SIZE; } else { cfg.channel_count = 2; cfg.buffer_size = STEREO_DATA_SIZE; } cfg.type = ENC_TYPE_WAV; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &audio->enc_id, sizeof(unsigned short))) { rc = -EFAULT; } break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; }
/* * Add a request and tell smbiod to process it */ int smb_add_request(struct smb_request *req) { long timeleft; struct smb_sb_info *server = req->rq_server; int result = 0; smb_setup_request(req); if (req->rq_trans2_command) { if (req->rq_buffer == NULL) { PARANOIA("trans2 attempted without response buffer!\n"); return -EIO; } result = smb_setup_trans2request(req); } if (result < 0) return result; #ifdef SMB_DEBUG_PACKET_SIZE add_xmit_stats(req); #endif /* add 'req' to the queue of requests */ if (smb_lock_server_interruptible(server)) return -EINTR; /* * Try to send the request as the process. If that fails we queue the * request and let smbiod send it later. */ /* FIXME: each server has a number on the maximum number of parallel requests. 10, 50 or so. We should not allow more requests to be active. */ if (server->mid > 0xf000) server->mid = 0; req->rq_mid = server->mid++; WSET(req->rq_header, smb_mid, req->rq_mid); result = 0; if (server->state == CONN_VALID) { if (list_empty(&server->xmitq)) result = smb_request_send_req(req); if (result < 0) { /* Connection lost? */ server->conn_error = result; server->state = CONN_INVALID; } } if (result != 1) list_add_tail(&req->rq_queue, &server->xmitq); smb_rget(req); if (server->state != CONN_VALID) smbiod_retry(server); smb_unlock_server(server); smbiod_wake_up(); timeleft = wait_event_interruptible_timeout(req->rq_wait, req->rq_flags & SMB_REQ_RECEIVED, 30*HZ); if (!timeleft || signal_pending(current)) { /* * On timeout or on interrupt we want to try and remove the * request from the recvq/xmitq. */ smb_lock_server(server); if (!(req->rq_flags & SMB_REQ_RECEIVED)) { list_del_init(&req->rq_queue); smb_rput(req); } smb_unlock_server(server); } if (!timeleft) { PARANOIA("request [%p, mid=%d] timed out!\n", req, req->rq_mid); VERBOSE("smb_com: %02x\n", *(req->rq_header + smb_com)); VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls)); VERBOSE("smb_flg: %02x\n", *(req->rq_header + smb_flg)); VERBOSE("smb_tid: %04x\n", WVAL(req->rq_header, smb_tid)); VERBOSE("smb_pid: %04x\n", WVAL(req->rq_header, smb_pid)); VERBOSE("smb_uid: %04x\n", WVAL(req->rq_header, smb_uid)); VERBOSE("smb_mid: %04x\n", WVAL(req->rq_header, smb_mid)); VERBOSE("smb_wct: %02x\n", *(req->rq_header + smb_wct)); req->rq_rcls = ERRSRV; req->rq_err = ERRtimeout; /* Just in case it was "stuck" */ smbiod_wake_up(); } VERBOSE("woke up, rcls=%d\n", req->rq_rcls); if (req->rq_rcls != 0) req->rq_errno = smb_errno(req); if (signal_pending(current)) req->rq_errno = -ERESTARTSYS; return req->rq_errno; }
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct ath6kl *ar = ath6kl_priv(dev); int status; ar->sme_state = SME_CONNECTING; if (!ath6kl_cfg80211_ready(ar)) return -EIO; if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { ath6kl_err("destroy in progress\n"); return -EBUSY; } if (test_bit(SKIP_SCAN, &ar->flag) && ((sme->channel && sme->channel->center_freq == 0) || (sme->bssid && is_zero_ether_addr(sme->bssid)))) { ath6kl_err("SkipScan: channel or bssid invalid\n"); return -EINVAL; } if (down_interruptible(&ar->sem)) { ath6kl_err("busy, couldn't get access\n"); return -ERESTARTSYS; } if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { ath6kl_err("busy, destroy in progress\n"); up(&ar->sem); return -EBUSY; } if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) { /* * sleep until the command queue drains */ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0, WMI_TIMEOUT); if (signal_pending(current)) { ath6kl_err("cmd queue drain timeout\n"); up(&ar->sem); return -EINTR; } } if (test_bit(CONNECTED, &ar->flag) && ar->ssid_len == sme->ssid_len && !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { ar->reconnect_flag = true; status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid, ar->ch_hint); up(&ar->sem); if (status) { ath6kl_err("wmi_reconnect_cmd failed\n"); return -EIO; } return 0; } else if (ar->ssid_len == sme->ssid_len && !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { ath6kl_disconnect(ar); } memset(ar->ssid, 0, sizeof(ar->ssid)); ar->ssid_len = sme->ssid_len; memcpy(ar->ssid, sme->ssid, sme->ssid_len); if (sme->channel) ar->ch_hint = sme->channel->center_freq; memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); if (sme->bssid && !is_broadcast_ether_addr(sme->bssid)) memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid)); ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions); status = ath6kl_set_auth_type(ar, sme->auth_type); if (status) { up(&ar->sem); return status; } if (sme->crypto.n_ciphers_pairwise) ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true); else ath6kl_set_cipher(ar, 0, true); ath6kl_set_cipher(ar, sme->crypto.cipher_group, false); if (sme->crypto.n_akm_suites) ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]); if ((sme->key_len) && (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) { struct ath6kl_key *key = NULL; if (sme->key_idx < WMI_MIN_KEY_INDEX || sme->key_idx > WMI_MAX_KEY_INDEX) { ath6kl_err("key index %d out of bounds\n", sme->key_idx); up(&ar->sem); return -ENOENT; } key = &ar->keys[sme->key_idx]; key->key_len = sme->key_len; memcpy(key->key, sme->key, key->key_len); key->cipher = ar->prwise_crypto; ar->def_txkey_index = sme->key_idx; ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx, ar->prwise_crypto, GROUP_USAGE | TX_USAGE, key->key_len, NULL, key->key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } if (!ar->usr_bss_filter) { clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) { ath6kl_err("couldn't set bss filtering\n"); up(&ar->sem); return -EIO; } } ar->nw_type = ar->next_mode; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: connect called with authmode %d dot11 auth %d" " PW crypto %d PW crypto len %d GRP crypto %d" " GRP crypto len %d channel hint %u\n", __func__, ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, ar->prwise_crypto_len, ar->grp_crypto, ar->grp_crypto_len, ar->ch_hint); ar->reconnect_flag = 0; status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, ar->dot11_auth_mode, ar->auth_mode, ar->prwise_crypto, ar->prwise_crypto_len, ar->grp_crypto, ar->grp_crypto_len, ar->ssid_len, ar->ssid, ar->req_bssid, ar->ch_hint, ar->connect_ctrl_flags); up(&ar->sem); if (status == -EINVAL) { memset(ar->ssid, 0, sizeof(ar->ssid)); ar->ssid_len = 0; ath6kl_err("invalid request\n"); return -ENOENT; } else if (status) { ath6kl_err("ath6kl_wmi_connect_cmd failed\n"); return -EIO; } if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && ((ar->auth_mode == WPA_PSK_AUTH) || (ar->auth_mode == WPA2_PSK_AUTH))) { mod_timer(&ar->disconnect_timer, jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL)); } ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; set_bit(CONNECT_PEND, &ar->flag); return 0; }
static int jpeg_enc_ioctl(unsigned int cmd, unsigned long arg, struct file *file) { int retValue; //unsigned int decResult; long timeout_jiff; unsigned int file_size, enc_result_code; //unsigned int _jpeg_enc_int_status; unsigned int jpeg_enc_wait_timeout = 0; unsigned int cycle_count; unsigned int ret ; unsigned int *pStatus; //JpegDrvEncParam cfgEnc; JPEG_ENC_DRV_IN cfgEnc; JpegDrvEncResult enc_result; //JpegDrvEncSrcCfg src_cfg; //JpegDrvEncDstCfg dst_cfg; //JpegDrvEncCtrlCfg ctrl_cfg; pStatus = (unsigned int*)file->private_data; if(NULL == pStatus) { JPEG_WRN("Private data is null in flush operation. HOW COULD THIS HAPPEN ??\n"); return -EFAULT; } switch(cmd) { case JPEG_ENC_IOCTL_RW_REG: //jpeg_drv_enc_rw_reg(); break; // initial and reset JPEG encoder case JPEG_ENC_IOCTL_INIT: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Encoder Init!!\n"); retValue = jpeg_drv_enc_init(); if(retValue == 0) { *pStatus = JPEG_ENC_PROCESS; } return retValue; break; case JPEG_ENC_IOCTL_WARM_RESET: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Encoder Warm Reset\n"); enc_result_code = jpeg_drv_enc_warm_reset(); if (0 == enc_result_code) { return -EFAULT; } break; // configure the register case JPEG_ENC_IOCTL_CONFIG: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Encoder Configure Hardware\n"); if(*pStatus != JPEG_ENC_PROCESS) { JPEG_WRN("Permission Denied! This process can not access encoder"); return -EFAULT; } if(enc_status == 0) { JPEG_WRN("Encoder status is available, HOW COULD THIS HAPPEN ??"); *pStatus = 0; return -EFAULT; } // copy input parameters if(copy_from_user(&cfgEnc, (void *)arg, sizeof(JPEG_ENC_DRV_IN))) { JPEG_MSG("[JPEGDRV]JPEG Encoder : Copy from user error\n"); return -EFAULT; } // 0. reset jpeg_drv_enc_reset(); // 1. set src config //memset(&src_cfg, 0, sizeof(JpegDrvEncSrcCfg)); //src_cfg.luma_addr = cfgEnc.srcBufferAddr; //if (cfgEnc.encFormat == NV12 || cfgEnc.encFormat == NV21) //{ // unsigned int srcChromaAddr = cfgEnc.srcChromaAddr; // srcChromaAddr = TO_CEIL(srcChromaAddr, 128); //((srcChromaAddr+127)&~127); // src_cfg.chroma_addr = srcChromaAddr; //} // //src_cfg.width = cfgEnc.encWidth; //src_cfg.height = cfgEnc.encHeight; //src_cfg.yuv_format = cfgEnc.encFormat; // 1. set src config JPEG_MSG("[JPEGDRV]SRC_IMG: %x %x, DU:%x, fmt:%x!!\n", cfgEnc.encWidth, cfgEnc.encHeight, cfgEnc.totalEncDU, cfgEnc.encFormat); ret = jpeg_drv_enc_set_src_image(cfgEnc.encWidth, cfgEnc.encHeight, cfgEnc.encFormat, cfgEnc.totalEncDU) ; if(ret == 0) { JPEG_MSG("[JPEGDRV]JPEG Encoder set srouce image failed\n"); return -EFAULT; } // 2. set src buffer info JPEG_MSG("[JPEGDRV]SRC_BUF: addr %x, %x, stride %x, %x!!\n", cfgEnc.srcBufferAddr, cfgEnc.srcChromaAddr ,cfgEnc.imgStride, cfgEnc.memStride); ret = jpeg_drv_enc_set_src_buf(cfgEnc.encFormat, cfgEnc.imgStride, cfgEnc.memStride, cfgEnc.srcBufferAddr, cfgEnc.srcChromaAddr); if(ret == 0) { JPEG_MSG("[JPEGDRV]JPEG Encoder set srouce buffer failed\n"); return -EFAULT; } //if (0 == jpeg_drv_enc_src_cfg(src_cfg)) //{ // JPEG_MSG("JPEG Encoder src cfg failed\n"); // return -EFAULT; //} // 3. set dst buffer info JPEG_MSG("[JPEGDRV]DST_BUF: addr:%x, size:%x, ofs:%x, mask:%x!!\n",cfgEnc.dstBufferAddr, cfgEnc.dstBufferSize, cfgEnc.dstBufAddrOffset, cfgEnc.dstBufAddrOffsetMask); ret = jpeg_drv_enc_set_dst_buff(cfgEnc.dstBufferAddr, cfgEnc.dstBufferSize, cfgEnc.dstBufAddrOffset, cfgEnc.dstBufAddrOffsetMask); if (ret == 0 ) { JPEG_MSG("[JPEGDRV]JPEG Encoder set dst buffer failed\n"); return -EFAULT; } //memset(&dst_cfg, 0, sizeof(JpegDrvEncDstCfg)); // //dst_cfg.dst_addr = cfgEnc.dstBufferAddr; //dst_cfg.dst_size = cfgEnc.dstBufferSize; //dst_cfg.exif_en = cfgEnc.enableEXIF; // //JPEG_MSG("go L:%d, %x, %d, %d !!", __LINE__, dst_cfg.dst_addr, dst_cfg.dst_size, dst_cfg.file_format); // //if (0 == jpeg_drv_enc_dst_buff(dst_cfg)) // return -EFAULT; // 4 .set ctrl config JPEG_MSG("[JPEGDRV]ENC_CFG: exif:%d, q:%d, DRI:%d !!\n", cfgEnc.enableEXIF, cfgEnc.encQuality, cfgEnc.restartInterval); jpeg_drv_enc_ctrl_cfg(cfgEnc.enableEXIF, cfgEnc.encQuality, cfgEnc.restartInterval); //memset(&ctrl_cfg, 0, sizeof(JpegDrvEncCtrlCfg)); // //ctrl_cfg.quality = cfgEnc.encQuality; //ctrl_cfg.gmc_disable = cfgEnc.disableGMC; //ctrl_cfg.restart_interval = cfgEnc.restartInterval; // break; case JPEG_ENC_IOCTL_START: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Encoder Start!!\n"); if(*pStatus != JPEG_ENC_PROCESS) { JPEG_WRN("Permission Denied! This process can not access encoder"); return -EFAULT; } if(enc_status == 0) { JPEG_WRN("Encoder status is available, HOW COULD THIS HAPPEN ??"); *pStatus = 0; return -EFAULT; } jpeg_drv_enc_start(); break; case JPEG_ENC_IOCTL_WAIT: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Encoder Wait!!\n"); if(*pStatus != JPEG_ENC_PROCESS) { JPEG_WRN("Permission Denied! This process can not access encoder"); return -EFAULT; } if(enc_status == 0) { JPEG_WRN("Encoder status is available, HOW COULD THIS HAPPEN ??"); *pStatus = 0; return -EFAULT; } if(copy_from_user(&enc_result, (void *)arg, sizeof(JpegDrvEncResult))) { JPEG_WRN("JPEG Encoder : Copy from user error\n"); return -EFAULT; } //TODO: ENC_DONE in REG_JPEG_ENC_INTERRUPT_STATUS need to set to 0 after read. jpeg_enc_wait_timeout = 0xFFFFFF; #ifdef FPGA_VERSION do { _jpeg_enc_int_status = REG_JPEG_ENC_INTERRUPT_STATUS; jpeg_enc_wait_timeout--; } while(_jpeg_enc_int_status == 0 && jpeg_enc_wait_timeout > 0); if (jpeg_enc_wait_timeout == 0) JPEG_MSG("JPEG Encoder timeout\n"); ret = jpeg_drv_enc_get_result(&file_size); //JPEG_MSG("Result : %d, Size : %u, addres : 0x%x\n", ret, file_size, ioread32(JPG_CODEC_BASE + 0x120)); JPEG_MSG("Result : %d, Size : %u, addres : 0x%x\n", ret, file_size, ioread32(JPEG_ENC_BASE + 0x120)); if(_jpeg_enc_int_status != 1) { jpeg_drv_enc_dump_reg(); } #else //set timeout timeout_jiff = enc_result.timeout* HZ / 1000; JPEG_MSG("[JPEGDRV]JPEG Encoder Time Jiffies : %ld\n", timeout_jiff); if(jpeg_isr_enc_lisr()<0) { wait_event_interruptible_timeout(enc_wait_queue, _jpeg_enc_int_status, timeout_jiff); JPEG_MSG("[JPEGDRV]JPEG Encoder Wait done !!\n"); } else { JPEG_MSG("[JPEGDRV]JPEG Encoder already done !!\n"); } ret = jpeg_drv_enc_get_result(&file_size); JPEG_MSG("[JPEGDRV]Result : %d, Size : %u!!\n", ret, file_size); if(ret != 0) { jpeg_drv_enc_dump_reg(); } #endif cycle_count = jpeg_drv_enc_get_cycle_count(); if(copy_to_user(enc_result.pFileSize, &file_size, sizeof(unsigned int))) { JPEG_MSG("[JPEGDRV]JPEG Encoder : Copy to user error (file size)\n"); return -EFAULT; } if(copy_to_user(enc_result.pResult, &ret, sizeof(unsigned int))) { JPEG_MSG("[JPEGDRV]JPEG Encoder : Copy to user error (status)\n"); return -EFAULT; } if(copy_to_user(enc_result.pCycleCount, &cycle_count, sizeof(unsigned int))) { JPEG_MSG("[JPEGDRV]JPEG Encoder : Copy to user error (cycle)\n"); return -EFAULT; } break; case JPEG_ENC_IOCTL_DEINIT: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Encoder Deinit!!\n"); // copy input parameters if(*pStatus != JPEG_ENC_PROCESS) { JPEG_WRN("Permission Denied! This process can not access encoder"); return -EFAULT; } if(enc_status == 0) { JPEG_WRN("Encoder status is available, HOW COULD THIS HAPPEN ??"); *pStatus = 0; return -EFAULT; } jpeg_drv_enc_deinit(); *pStatus = 0; break; case JPEG_ENC_IOCTL_DUMP_REG: jpeg_drv_enc_dump_reg(); break; default: JPEG_MSG("[JPEGDRV]JPEG ENC IOCTL NO THIS COMMAND\n"); } return 0; }
/** * rpmsg_send_offchannel_raw() - send a message across to the remote processor * @rpdev: the rpmsg channel * @src: source address * @dst: destination address * @data: payload of message * @len: length of payload * @wait: indicates whether caller should block in case no TX buffers available * * This function is the base implementation for all of the rpmsg sending API. * * It will send @data of length @len to @dst, and say it's from @src. The * message will be sent to the remote processor which the @rpdev channel * belongs to. * * The message is sent using one of the TX buffers that are available for * communication with this remote processor. * * If @wait is true, the caller will be blocked until either a TX buffer is * available, or 15 seconds elapses (we don't want callers to * sleep indefinitely due to misbehaving remote processors), and in that * case -ERESTARTSYS is returned. The number '15' itself was picked * arbitrarily; there's little point in asking drivers to provide a timeout * value themselves. * * Otherwise, if @wait is false, and there are no TX buffers available, * the function will immediately fail, and -ENOMEM will be returned. * * Normally drivers shouldn't use this function directly; instead, drivers * should use the appropriate rpmsg_{try}send{to, _offchannel} API * (see include/linux/rpmsg.h). * * Returns 0 on success and an appropriate error value on failure. */ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, void *data, int len, bool wait) { struct virtproc_info *vrp = rpdev->vrp; struct device *dev = &rpdev->dev; struct scatterlist sg; struct rpmsg_hdr *msg; int err; /* bcasting isn't allowed */ if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); return -EINVAL; } /* * We currently use fixed-sized buffers, and therefore the payload * length is limited. * * One of the possible improvements here is either to support * user-provided buffers (and then we can also support zero-copy * messaging), or to improve the buffer allocator, to support * variable-length buffer sizes. */ if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { dev_err(dev, "message is too big (%d)\n", len); return -EMSGSIZE; } /* grab a buffer */ msg = get_a_tx_buf(vrp); if (!msg && !wait) return -ENOMEM; /* no free buffer ? wait for one (but bail after 15 seconds) */ while (!msg) { /* enable "tx-complete" interrupts, if not already enabled */ rpmsg_upref_sleepers(vrp); /* * sleep until a free buffer is available or 15 secs elapse. * the timeout period is not configurable because there's * little point in asking drivers to specify that. * if later this happens to be required, it'd be easy to add. */ err = wait_event_interruptible_timeout(vrp->sendq, (msg = get_a_tx_buf(vrp)), msecs_to_jiffies(15000)); /* disable "tx-complete" interrupts if we're the last sleeper */ rpmsg_downref_sleepers(vrp); /* timeout ? */ if (!err) { dev_err(dev, "timeout waiting for a tx buffer\n"); return -ERESTARTSYS; } } msg->len = len; msg->flags = 0; msg->src = src; msg->dst = dst; msg->reserved = 0; memcpy(msg->data, data, len); dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); #if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) print_hex_dump_debug("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif sg_init_one(&sg, msg, sizeof(*msg) + len); mutex_lock(&vrp->tx_lock); /* add message to the remote processor's virtqueue */ err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); if (err) { /* * need to reclaim the buffer here, otherwise it's lost * (memory won't leak, but rpmsg won't use it again for TX). * this will wait for a buffer management overhaul. */ dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err); goto out; } /* tell the remote processor it has a pending message to read */ virtqueue_kick(vrp->svq); out: mutex_unlock(&vrp->tx_lock); return err; }
ssize_t sleepy_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = 0; int minor, i = 0; unsigned long start, end, elapsed, remaining_seconds, sleep_jiffies; if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ // Determine if four bytes were passed. if (count != 4) { mutex_unlock(&dev->sleepy_mutex); return -EINVAL; } // Copy int from user if (copy_from_user(&dev->data, buf, count)) { mutex_unlock(&dev->sleepy_mutex); return -EFAULT; } i = (int)(dev->data); // If the integer is less than zero, return if(i < 0) { mutex_unlock(&dev->sleepy_mutex); return retval; } // Compute sleep time in jiffies sleep_jiffies = i * HZ; // Set the wake-up variable to zero so processes don't wake up dev->wakeytime = 0; // Record what time the sleep started start = jiffies; // Unlock the lock mutex_unlock(&dev->sleepy_mutex); // Put process on the sleep queue until someone reads or until time expires. wait_event_interruptible_timeout(dev->wqueue, dev->wakeytime == 1, i * HZ); // Get lock if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; // Record what time we woke up end = jiffies; // Compute how many seconds elapsed elapsed = (end - start) / HZ; // Subtract from how many seconds we sleep for. remaining_seconds = i - elapsed; // Compute device number minor = (int)iminor(filp->f_path.dentry->d_inode); printk("SLEEPY_WRITE DEVICE (%d): remaining = %zd \n", minor, remaining_seconds); // If the device was woken up from a read we want to return the elapsed time if (dev->wakeytime == 1) retval = elapsed; else retval = 0; /* END YOUR CODE */ mutex_unlock(&dev->sleepy_mutex); return retval; }
/* * This is a RT kernel thread that handles the I2c accesses * The I2c access functions are expected to be able to sleep. */ static int ts_thread(void *_ts) { int ret; struct point points[MAX_TOUCHES]; unsigned char buf[3+(6*MAX_TOUCHES)]; struct ft5x06_ts *ts = _ts; unsigned char startch[1] = { 0 }; struct i2c_msg readpkt[2] = { {ts->client->addr, 0, 1, startch}, {ts->client->addr, I2C_M_RD, sizeof(buf), buf} }; struct task_struct *tsk = current; ts->rtask = tsk; daemonize("ft5x06tsd"); /* only want to receive SIGKILL */ allow_signal(SIGKILL); complete(&ts->init_exit); do { int buttons = 0 ; ts->bReady = 0; ret = i2c_transfer(ts->client->adapter, readpkt, ARRAY_SIZE(readpkt)); if (ret != ARRAY_SIZE(readpkt)) { printk(KERN_WARNING "%s: i2c_transfer failed\n", client_name); msleep(1000); } else { int i; unsigned char *p = buf+3; #ifdef DEBUG printHex(buf, sizeof(buf)); #endif buttons = buf[2]; if (buttons > MAX_TOUCHES) { int interrupting = (0 == gpio_get_value(ts->gp)); if (interrupting) { printk(KERN_ERR "%s: invalid button count 0x%02x\n", __func__, buttons); } /* not garbage from POR */ buttons = interrupting ? MAX_TOUCHES : 0; } else { for (i = 0; i < buttons; i++) { points[i].x = (((p[0] & 0x0f) << 8) | p[1]) & 0x7ff; points[i].id = (p[2]>>4); points[i].y = (((p[2] & 0x0f) << 8) | p[3]) & 0x7ff; p += 6; } } } if (signal_pending(tsk)) break; #ifdef DEBUG printk(KERN_ERR "%s: buttons = %d, " "points[0].x = %d, " "points[0].y = %d\n", client_name, buttons, points[0].x, points[0].y); #endif ts_evt_add(ts, buttons, points); if (0 < buttons) wait_event_interruptible_timeout(ts->sample_waitq, ts->bReady, HZ/20); else wait_event_interruptible(ts->sample_waitq, ts->bReady); if (signal_pending(tsk)) break; } while (1); ts->rtask = NULL; complete_and_exit(&ts->init_exit, 0); }
/* * Sends IOCTL request to cancel the existing Host Sleep configuration. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) { struct mwifiex_ds_hs_cfg hscfg; struct mwifiex_private *priv; int i; if (disconnect_on_suspend) { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (priv) mwifiex_deauthenticate(priv, NULL); } } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); if (priv && priv->sched_scanning) { #ifdef CONFIG_PM if (priv->wdev.wiphy->wowlan_config && !priv->wdev.wiphy->wowlan_config->nd_config) { #endif mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); mwifiex_stop_bg_scan(priv); cfg80211_sched_scan_stopped(priv->wdev.wiphy); #ifdef CONFIG_PM } #endif } if (adapter->hs_activated) { mwifiex_dbg(adapter, CMD, "cmd: HS Already activated\n"); return true; } adapter->hs_activate_wait_q_woken = false; memset(&hscfg, 0, sizeof(hscfg)); hscfg.is_invoke_hostcmd = true; adapter->hs_enabling = true; mwifiex_cancel_all_pending_cmd(adapter); if (mwifiex_set_hs_params(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hscfg)) { mwifiex_dbg(adapter, ERROR, "IOCTL request HS enable failed\n"); return false; } if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q, adapter->hs_activate_wait_q_woken, (10 * HZ)) <= 0) { mwifiex_dbg(adapter, ERROR, "hs_activate_wait_q terminated\n"); return false; } return true; }
__s32 BSP_disp_capture_screen(__u32 sel, __disp_capture_screen_para_t * para) { __scal_buf_addr_t in_addr; __scal_buf_addr_t out_addr; __scal_src_size_t in_size; __scal_out_size_t out_size; __scal_src_type_t in_type; __scal_out_type_t out_type; __scal_scan_mod_t in_scan; __scal_scan_mod_t out_scan; __u32 size = 0; __s32 scaler_idx = 0; __s32 ret = 0; if(para==NULL) { DE_WRN("input parameter can't be null!\n"); return DIS_FAIL; } scaler_idx = Scaler_Request(0xff); if(scaler_idx < 0) { DE_WRN("request scaler fail in BSP_disp_capture_screen\n"); return DIS_FAIL; } else { gdisp.scaler[sel].screen_index = 0xff; } in_type.mod= Scaler_sw_para_to_reg(1,DISP_MOD_INTERLEAVED); in_type.fmt= Scaler_sw_para_to_reg(0,DISP_FORMAT_ARGB8888); in_type.ps= Scaler_sw_para_to_reg(2,DISP_SEQ_ARGB); in_type.byte_seq = 0; in_type.sample_method = 0; if(get_fb_type(para->output_fb.format) == DISP_FB_TYPE_YUV) { if(para->output_fb.mode == DISP_MOD_NON_MB_PLANAR) { out_type.fmt = Scaler_sw_para_to_reg(3, para->output_fb.format); } else { DE_WRN("output mode:%d invalid in Display_Scaler_Start\n",para->output_fb.mode); return DIS_FAIL; } } else { if(para->output_fb.mode == DISP_MOD_NON_MB_PLANAR && (para->output_fb.format == DISP_FORMAT_RGB888 || para->output_fb.format == DISP_FORMAT_ARGB8888)) { out_type.fmt = DE_SCAL_OUTPRGB888; } else if(para->output_fb.mode == DISP_MOD_INTERLEAVED && para->output_fb.format == DISP_FORMAT_ARGB8888) { out_type.fmt = DE_SCAL_OUTI0RGB888; } else { DE_WRN("output para invalid in Display_Scaler_Start,mode:%d,format:%d\n",para->output_fb.mode, para->output_fb.format); return DIS_FAIL; } para->output_fb.br_swap= FALSE; } out_type.byte_seq = Scaler_sw_para_to_reg(2,para->output_fb.seq); out_size.width = para->output_fb.size.width; out_size.height = para->output_fb.size.height; if(BSP_disp_get_output_type(sel) != DISP_OUTPUT_TYPE_NONE) { in_size.src_width = BSP_disp_get_screen_width(sel); in_size.src_height = BSP_disp_get_screen_height(sel); in_size.x_off = 0; in_size.y_off = 0; in_size.scal_width= BSP_disp_get_screen_width(sel); in_size.scal_height= BSP_disp_get_screen_height(sel); } else { in_size.src_width = para->screen_size.width; in_size.src_height= para->screen_size.height; in_size.x_off = 0; in_size.y_off = 0; in_size.scal_width= para->screen_size.width; in_size.scal_height= para->screen_size.height; } in_scan.field = FALSE; in_scan.bottom = FALSE; out_scan.field = FALSE; //when use scaler as writeback, won't be outinterlaced for any display device out_scan.bottom = FALSE; in_addr.ch0_addr = 0; in_addr.ch1_addr = 0; in_addr.ch2_addr = 0; out_addr.ch0_addr = (__u32)OSAL_VAtoPA((void*)(para->output_fb.addr[0])); out_addr.ch1_addr = (__u32)OSAL_VAtoPA((void*)(para->output_fb.addr[1])); out_addr.ch2_addr = (__u32)OSAL_VAtoPA((void*)(para->output_fb.addr[2])); size = (para->output_fb.size.width * para->output_fb.size.height * de_format_to_bpp(para->output_fb.format) + 7)/8; OSAL_CacheRangeFlush((void *)para->output_fb.addr[0],size ,CACHE_FLUSH_D_CACHE_REGION); if(BSP_disp_get_output_type(sel) == DISP_OUTPUT_TYPE_NONE) { DE_SCAL_Input_Select(scaler_idx, 6 + sel); DE_BE_set_display_size(sel, para->screen_size.width, para->screen_size.height); DE_BE_Output_Select(sel, 6 + scaler_idx); image_clk_on(sel); Image_open(sel); DE_BE_Cfg_Ready(sel); } else { DE_SCAL_Input_Select(scaler_idx, 4 + sel); DE_BE_Output_Select(sel, 2 + (scaler_idx * 2) + sel); } DE_SCAL_Config_Src(scaler_idx,&in_addr,&in_size,&in_type,FALSE,FALSE); DE_SCAL_Set_Scaling_Factor(scaler_idx, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type); DE_SCAL_Set_Init_Phase(scaler_idx, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, FALSE); DE_SCAL_Set_CSC_Coef(scaler_idx, DISP_BT601, para->output_fb.cs_mode, DISP_FB_TYPE_RGB, get_fb_type(para->output_fb.format), 0, 0); DE_SCAL_Set_Scaling_Coef(scaler_idx, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, DISP_VIDEO_NATUAL); DE_SCAL_Set_Out_Format(scaler_idx, &out_type); DE_SCAL_Set_Out_Size(scaler_idx, &out_scan,&out_type, &out_size); DE_SCAL_Set_Writeback_Addr(scaler_idx,&out_addr); DE_SCAL_Output_Select(scaler_idx, 3); DE_SCAL_ClearINT(scaler_idx,DE_WB_END_IE); DE_SCAL_EnableINT(scaler_idx,DE_WB_END_IE); DE_SCAL_Set_Reg_Rdy(scaler_idx); DE_SCAL_Writeback_Enable(scaler_idx); DE_SCAL_Start(scaler_idx); DE_INF("capture begin\n"); #ifndef __LINUX_OSAL__ while(!(DE_SCAL_QueryINT(scaler_idx) & DE_WB_END_IE) ) { } #else { long timeout = (100 * HZ)/1000;//100ms init_waitqueue_head(&(gdisp.scaler[scaler_idx].scaler_queue)); gdisp.scaler[scaler_idx].b_scaler_finished = 1; DE_SCAL_Writeback_Enable(scaler_idx); timeout = wait_event_interruptible_timeout(gdisp.scaler[scaler_idx].scaler_queue, gdisp.scaler[scaler_idx].b_scaler_finished == 2, timeout); gdisp.scaler[scaler_idx].b_scaler_finished = 0; if(timeout == 0) { __wrn("wait scaler %d finished timeout\n", scaler_idx); return -1; } } #endif DE_SCAL_Reset(scaler_idx); Scaler_Release(scaler_idx, FALSE); if(BSP_disp_get_output_type(sel) == DISP_OUTPUT_TYPE_NONE) { Image_close(sel); image_clk_off(sel); } DE_BE_Output_Select(sel, sel); return ret; }
__s32 BSP_disp_scaler_start_ex(__u32 handle,__disp_scaler_para_t *para) { __scal_buf_addr_t in_addr; __scal_buf_addr_t out_addr; __scal_src_size_t in_size; __scal_out_size_t out_size; __scal_src_type_t in_type; __scal_out_type_t out_type; __scal_scan_mod_t in_scan; __scal_scan_mod_t out_scan; __u32 size = 0; __u32 sel = 0; __s32 ret = 0; if(para==NULL) { DE_WRN("input parameter can't be null!\n"); return DIS_FAIL; } sel = SCALER_HANDTOID(handle); in_type.mod= Scaler_sw_para_to_reg(1,para->input_fb.mode); in_type.fmt= Scaler_sw_para_to_reg(0,para->input_fb.format); in_type.ps= Scaler_sw_para_to_reg(2,(__u8)para->input_fb.seq); in_type.byte_seq = 0; in_type.sample_method = 0; if(get_fb_type(para->output_fb.format) == DISP_FB_TYPE_YUV) { if(para->output_fb.mode == DISP_MOD_NON_MB_PLANAR) { out_type.fmt = Scaler_sw_para_to_reg(3, para->output_fb.format); } else { DE_WRN("output mode:%d invalid in Display_Scaler_Start\n",para->output_fb.mode); return DIS_FAIL; } } else { if(para->output_fb.mode == DISP_MOD_NON_MB_PLANAR && (para->output_fb.format == DISP_FORMAT_RGB888 || para->output_fb.format == DISP_FORMAT_ARGB8888)) { out_type.fmt = DE_SCAL_OUTPRGB888; } else if(para->output_fb.mode == DISP_MOD_INTERLEAVED && para->output_fb.format == DISP_FORMAT_ARGB8888) { out_type.fmt = DE_SCAL_OUTI0RGB888; } else { DE_WRN("output para invalid in Display_Scaler_Start,mode:%d,format:%d\n",para->output_fb.mode, para->output_fb.format); return DIS_FAIL; } } out_type.byte_seq = Scaler_sw_para_to_reg(2,para->output_fb.seq); out_size.width = para->out_regn.width; out_size.height = para->out_regn.height; out_size.x_off = para->out_regn.x; out_size.y_off = para->out_regn.y; out_size.fb_width = para->output_fb.size.width; out_size.fb_height = para->output_fb.size.height; in_addr.ch0_addr = (__u32)OSAL_VAtoPA((void*)(para->input_fb.addr[0])); in_addr.ch1_addr = (__u32)OSAL_VAtoPA((void*)(para->input_fb.addr[1])); in_addr.ch2_addr = (__u32)OSAL_VAtoPA((void*)(para->input_fb.addr[2])); in_size.src_width = para->input_fb.size.width; in_size.src_height = para->input_fb.size.height; in_size.x_off = para->source_regn.x; in_size.y_off = para->source_regn.y; in_size.scal_width= para->source_regn.width; in_size.scal_height= para->source_regn.height; in_scan.field = FALSE; in_scan.bottom = FALSE; out_scan.field = FALSE; //when use scaler as writeback, won't be outinterlaced for any display device out_scan.bottom = FALSE; out_addr.ch0_addr = (__u32)OSAL_VAtoPA((void*)(para->output_fb.addr[0])); out_addr.ch1_addr = (__u32)OSAL_VAtoPA((void*)(para->output_fb.addr[1])); out_addr.ch2_addr = (__u32)OSAL_VAtoPA((void*)(para->output_fb.addr[2])); size = (para->input_fb.size.width * para->input_fb.size.height * de_format_to_bpp(para->input_fb.format) + 7)/8; OSAL_CacheRangeFlush((void *)para->input_fb.addr[0],size ,CACHE_CLEAN_FLUSH_D_CACHE_REGION); size = (para->output_fb.size.width * para->output_fb.size.height * de_format_to_bpp(para->output_fb.format) + 7)/8; OSAL_CacheRangeFlush((void *)para->output_fb.addr[0],size ,CACHE_FLUSH_D_CACHE_REGION); if(para->input_fb.b_trd_src) { __scal_3d_inmode_t inmode; __scal_3d_outmode_t outmode = 0; __scal_buf_addr_t scal_addr_right; inmode = Scaler_3d_sw_para_to_reg(0, para->input_fb.trd_mode, FALSE); outmode = Scaler_3d_sw_para_to_reg(1, para->output_fb.trd_mode, FALSE); DE_SCAL_Get_3D_In_Single_Size(inmode, &in_size, &in_size); if(para->output_fb.b_trd_src) { DE_SCAL_Get_3D_Out_Single_Size(outmode, &out_size, &out_size); } scal_addr_right.ch0_addr= (__u32)OSAL_VAtoPA((void*)(para->input_fb.trd_right_addr[0])); scal_addr_right.ch1_addr= (__u32)OSAL_VAtoPA((void*)(para->input_fb.trd_right_addr[1])); scal_addr_right.ch2_addr= (__u32)OSAL_VAtoPA((void*)(para->input_fb.trd_right_addr[2])); DE_SCAL_Set_3D_Ctrl(sel, para->output_fb.b_trd_src, inmode, outmode); DE_SCAL_Config_3D_Src(sel, &in_addr, &in_size, &in_type, inmode, &scal_addr_right); } else { DE_SCAL_Config_Src(sel,&in_addr,&in_size,&in_type,FALSE,FALSE); } DE_SCAL_Set_Scaling_Factor(sel, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type); DE_SCAL_Set_Init_Phase(sel, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, FALSE); DE_SCAL_Set_CSC_Coef(sel, para->input_fb.cs_mode, para->output_fb.cs_mode, get_fb_type(para->input_fb.format), get_fb_type(para->output_fb.format), para->input_fb.br_swap, para->output_fb.br_swap); DE_SCAL_Set_Scaling_Coef(sel, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, DISP_VIDEO_NATUAL); DE_SCAL_Set_Out_Format(sel, &out_type); DE_SCAL_Set_Out_Size(sel, &out_scan,&out_type, &out_size); //DE_SCAL_Set_Writeback_Addr(sel,&out_addr); DE_SCAL_Set_Writeback_Addr_ex(sel,&out_addr,&out_size,&out_type); DE_SCAL_Writeback_Linestride_Enable(sel); DE_SCAL_Output_Select(sel, 3); //BSP_disp_print_reg(1, sel); DE_SCAL_EnableINT(sel,DE_WB_END_IE); DE_SCAL_Start(sel); DE_SCAL_Set_Reg_Rdy(sel); #ifndef __LINUX_OSAL__ DE_SCAL_Writeback_Enable(sel); while(!(DE_SCAL_QueryINT(sel) & DE_WB_END_IE) ) { } #else { long timeout = (100 * HZ)/1000;//100ms init_waitqueue_head(&(gdisp.scaler[sel].scaler_queue)); gdisp.scaler[sel].b_scaler_finished = 1; DE_SCAL_Writeback_Enable(sel); timeout = wait_event_interruptible_timeout(gdisp.scaler[sel].scaler_queue, gdisp.scaler[sel].b_scaler_finished == 2, timeout); gdisp.scaler[sel].b_scaler_finished = 0; if(timeout == 0) { __wrn("wait scaler %d finished timeout\n", sel); return -1; } } #endif DE_SCAL_Reset(sel); DE_SCAL_Writeback_Disable(sel); DE_SCAL_Writeback_Linestride_Disable(sel); return ret; }
static int _connect(struct client_output *output, const char *str) { struct sockaddr_storage sockaddr = {}; int status; if (unlikely(!output->path)) { output->path = brick_strdup(str); status = -ENOMEM; if (!output->path) { MARS_DBG("no mem\n"); goto done; } status = -EINVAL; output->host = strchr(output->path, '@'); if (!output->host) { brick_string_free(output->path); output->path = NULL; MARS_ERR("parameter string '%s' contains no remote specifier with '@'-syntax\n", str); goto done; } *output->host++ = '\0'; } if (unlikely(output->receiver.thread)) { MARS_WRN("receiver thread unexpectedly not dead\n"); _kill_thread(&output->receiver, "receiver"); } status = mars_create_sockaddr(&sockaddr, output->host); if (unlikely(status < 0)) { MARS_DBG("no sockaddr, status = %d\n", status); goto done; } status = mars_create_socket(&output->socket, &sockaddr, false); if (unlikely(status < 0)) { MARS_DBG("no socket, status = %d\n", status); goto really_done; } output->socket.s_shutdown_on_err = true; output->socket.s_send_abort = mars_client_abort; output->socket.s_recv_abort = mars_client_abort; output->receiver.thread = brick_thread_create(receiver_thread, output, "mars_receiver%d", thread_count++); if (unlikely(!output->receiver.thread)) { MARS_ERR("cannot start receiver thread, status = %d\n", status); status = -ENOENT; goto done; } { struct mars_cmd cmd = { .cmd_code = CMD_CONNECT, .cmd_str1 = output->path, }; status = mars_send_struct(&output->socket, &cmd, mars_cmd_meta); if (unlikely(status < 0)) { MARS_DBG("send of connect failed, status = %d\n", status); goto done; } } if (status >= 0) { status = _request_info(output); } done: if (status < 0) { MARS_INF("cannot connect to remote host '%s' (status = %d) -- retrying\n", output->host ? output->host : "NULL", status); _kill_socket(output); } really_done: return status; } ////////////////// own brick / input / output operations ////////////////// static int client_get_info(struct client_output *output, struct mars_info *info) { int status; output->got_info = false; output->get_info = true; wake_up_interruptible(&output->event); wait_event_interruptible_timeout(output->info_event, output->got_info, 60 * HZ); status = -ETIME; if (output->got_info && info) { memcpy(info, &output->info, sizeof(*info)); status = 0; } //done: return status; }
static int sender_thread(void *data) { struct client_output *output = data; struct client_brick *brick = output->brick; unsigned long flags; bool do_kill = false; int status = 0; output->receiver.restart_count = 0; while (!brick_thread_should_stop()) { struct list_head *tmp = NULL; struct client_mref_aspect *mref_a; struct mref_object *mref; if (brick->power.io_timeout > 0) { _do_timeout(output, &output->wait_list, false); _do_timeout(output, &output->mref_list, false); } if (unlikely(output->recv_error != 0 || !mars_socket_is_alive(&output->socket))) { MARS_DBG("recv_error = %d do_kill = %d\n", output->recv_error, do_kill); if (do_kill) { do_kill = false; _kill_socket(output); brick_msleep(3000); } status = _connect(output, brick->brick_name); MARS_IO("connect status = %d\n", status); if (unlikely(status < 0)) { brick_msleep(3000); _do_timeout(output, &output->wait_list, false); _do_timeout(output, &output->mref_list, false); continue; } brick->connection_state = 2; do_kill = true; /* Re-Submit any waiting requests */ MARS_IO("re-submit\n"); _do_resubmit(output); } wait_event_interruptible_timeout(output->event, !list_empty(&output->mref_list) || output->get_info || output->recv_error != 0 || brick_thread_should_stop(), 1 * HZ); if (unlikely(output->recv_error != 0)) { MARS_DBG("recv_error = %d\n", output->recv_error); brick_msleep(1000); continue; } if (output->get_info) { status = _request_info(output); if (status >= 0) { output->get_info = false; } else { MARS_WRN("cannot get info, status = %d\n", status); brick_msleep(1000); } } /* Grab the next mref from the queue */ traced_lock(&output->lock, flags); if (list_empty(&output->mref_list)) { traced_unlock(&output->lock, flags); continue; } tmp = output->mref_list.next; list_del(tmp); list_add(tmp, &output->wait_list); mref_a = container_of(tmp, struct client_mref_aspect, io_head); traced_unlock(&output->lock, flags); mref = mref_a->object; if (brick->limit_mode) { int amount = 0; if (mref->ref_cs_mode < 2) amount = (mref->ref_len - 1) / 1024 + 1; mars_limit_sleep(&client_limiter, amount); } MARS_IO("sending mref, id = %d pos = %lld len = %d rw = %d\n", mref->ref_id, mref->ref_pos, mref->ref_len, mref->ref_rw); status = mars_send_mref(&output->socket, mref); MARS_IO("status = %d\n", status); if (unlikely(status < 0)) { // retry submission on next occasion.. MARS_WRN("sending failed, status = %d\n", status); if (do_kill) { do_kill = false; _kill_socket(output); } _hash_insert(output, mref_a); brick_msleep(1000); continue; } } //done: if (status < 0) { MARS_WRN("sender thread terminated with status = %d\n", status); } if (do_kill) { _kill_socket(output); } /* Signal error on all pending IO requests. * We have no other chance (except probably delaying * this until destruction which is probably not what * we want). */ _do_timeout(output, &output->wait_list, true); _do_timeout(output, &output->mref_list, true); wake_up_interruptible(&output->sender.run_event); MARS_DBG("sender terminated\n"); return status; }
int kgsl_g12_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_context *context, struct kgsl_ibdesc *ibdesc, unsigned int numibs, uint32_t *timestamp, unsigned int ctrl) { unsigned int result = 0; unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int); unsigned int cnt = 5; unsigned int nextaddr = 0; unsigned int index = 0; unsigned int nextindex; unsigned int nextcnt = KGSL_G12_STREAM_END_CMD | 5; struct kgsl_memdesc tmp = {0}; unsigned int cmd; struct kgsl_device *device = dev_priv->device; struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable; struct kgsl_g12_device *g12_device = KGSL_G12_DEVICE(device); unsigned int sizedwords; if (device->state & KGSL_STATE_HUNG) { return -EINVAL; goto error; } if (numibs != 1) { KGSL_DRV_ERR(device, "Invalid number of ibs: %d\n", numibs); result = -EINVAL; goto error; } cmd = ibdesc[0].gpuaddr; sizedwords = ibdesc[0].sizedwords; tmp.hostptr = (void *)*timestamp; KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n", context->id, cmd, sizedwords); /* context switch */ if ((context->id != (int)g12_device->ringbuffer.prevctx) || (ctrl & KGSL_CONTEXT_CTX_SWITCH)) { KGSL_CMD_INFO(device, "context switch %d -> %d\n", context->id, g12_device->ringbuffer.prevctx); kgsl_mmu_setstate(device, pagetable); cnt = PACKETSIZE_STATESTREAM; ofs = 0; } kgsl_g12_setstate(device, kgsl_pt_get_flags(device->mmu.hwpagetable, device->id)); result = wait_event_interruptible_timeout(device->wait_queue, room_in_rb(g12_device), msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT)); if (result < 0) { KGSL_CMD_ERR(device, "wait_event_interruptible_timeout " "failed: %d\n", result); goto error; } result = 0; index = g12_device->current_timestamp % KGSL_G12_PACKET_COUNT; g12_device->current_timestamp++; nextindex = g12_device->current_timestamp % KGSL_G12_PACKET_COUNT; *timestamp = g12_device->current_timestamp; g12_device->ringbuffer.prevctx = context->id; addcmd(&g12_device->ringbuffer, index, cmd + ofs, cnt); /* Make sure the next ringbuffer entry has a marker */ addmarker(&g12_device->ringbuffer, nextindex); nextaddr = g12_device->ringbuffer.cmdbufdesc.gpuaddr + rb_offset(nextindex); tmp.hostptr = (void *)(tmp.hostptr + (sizedwords * sizeof(unsigned int))); tmp.size = 12; kgsl_sharedmem_writel(&tmp, 4, nextaddr); kgsl_sharedmem_writel(&tmp, 8, nextcnt); /* sync memory before activating the hardware for the new command*/ mb(); cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK) << VGV3_CONTROL_MARKADD_FSHIFT); kgsl_g12_cmdwindow_write(device, KGSL_CMDWINDOW_2D, ADDR_VGV3_CONTROL, cmd); kgsl_g12_cmdwindow_write(device, KGSL_CMDWINDOW_2D, ADDR_VGV3_CONTROL, 0); error: return result; }
static void wait_RDS(void) { wait_event_interruptible_timeout(Si47xx_waitq, (Si47xx_dev_wait_flag == WAIT_OVER), Si47xx_dev->settings.timeout_RDS); }
/* Fill the buffer. Called with dev->lock held */ static int _chaoskey_fill(struct chaoskey *dev) { DEFINE_WAIT(wait); int result; bool started; usb_dbg(dev->interface, "fill"); /* Return immediately if someone called before the buffer was * empty */ if (dev->valid != dev->used) { usb_dbg(dev->interface, "not empty yet (valid %d used %d)", dev->valid, dev->used); return 0; } /* Bail if the device has been removed */ if (!dev->present) { usb_dbg(dev->interface, "device not present"); return -ENODEV; } /* Make sure the device is awake */ result = usb_autopm_get_interface(dev->interface); if (result) { usb_dbg(dev->interface, "wakeup failed (result %d)", result); return result; } dev->reading = true; result = usb_submit_urb(dev->urb, GFP_KERNEL); if (result < 0) { result = usb_translate_errors(result); dev->reading = false; goto out; } /* The first read on the Alea takes a little under 2 seconds. * Reads after the first read take only a few microseconds * though. Presumably the entropy-generating circuit needs * time to ramp up. So, we wait longer on the first read. */ started = dev->reads_started; dev->reads_started = true; result = wait_event_interruptible_timeout( dev->wait_q, !dev->reading, (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); if (result < 0) goto out; if (result == 0) result = -ETIMEDOUT; else result = dev->valid; out: /* Let the device go back to sleep eventually */ usb_autopm_put_interface(dev->interface); usb_dbg(dev->interface, "read %d bytes", dev->valid); return result; }
static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum, __u8 *buf, size_t count, unsigned char rtype) { struct uhid_device *uhid = hid->driver_data; __u8 report_type; struct uhid_event *ev; unsigned long flags; int ret; size_t uninitialized_var(len); struct uhid_feature_answer_req *req; if (!uhid->running) return -EIO; switch (rtype) { case HID_FEATURE_REPORT: report_type = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: report_type = UHID_OUTPUT_REPORT; break; case HID_INPUT_REPORT: report_type = UHID_INPUT_REPORT; break; default: return -EINVAL; } ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) return ret; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) { ret = -ENOMEM; goto unlock; } spin_lock_irqsave(&uhid->qlock, flags); ev->type = UHID_FEATURE; ev->u.feature.id = atomic_inc_return(&uhid->report_id); ev->u.feature.rnum = rnum; ev->u.feature.rtype = report_type; atomic_set(&uhid->report_done, 0); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); ret = wait_event_interruptible_timeout(uhid->report_wait, atomic_read(&uhid->report_done), 5 * HZ); /* * Make sure "uhid->running" is cleared on shutdown before * "uhid->report_done" is set. */ smp_rmb(); if (!ret || !uhid->running) { ret = -EIO; } else if (ret < 0) { ret = -ERESTARTSYS; } else { spin_lock_irqsave(&uhid->qlock, flags); req = &uhid->report_buf.u.feature_answer; if (req->err) { ret = -EIO; } else { ret = 0; len = min(count, min_t(size_t, req->size, UHID_DATA_MAX)); memcpy(buf, req->data, len); } spin_unlock_irqrestore(&uhid->qlock, flags); } atomic_set(&uhid->report_done, 1); unlock: mutex_unlock(&uhid->report_lock); return ret ? ret : len; }
/** * Main entrypoint for syncpoint value waits. */ int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, u32 timeout, u32 *value) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); void *ref; void *waiter; int err = 0, check_count = 0, low_timeout = 0; u32 val; if (value) *value = 0; /* first check cache */ if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); return 0; } /* keep host alive */ nvhost_module_busy(syncpt_to_dev(sp)->dev); /* try to read from register */ val = syncpt_op().update_min(sp, id); if (nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = val; goto done; } if (!timeout) { err = -EAGAIN; goto done; } /* schedule a wakeup when the syncpoint value is reached */ waiter = nvhost_intr_alloc_waiter(); if (!waiter) { err = -ENOMEM; goto done; } err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, waiter, &ref); if (err) goto done; err = -EAGAIN; /* Caller-specified timeout may be impractically low */ if (timeout < SYNCPT_CHECK_PERIOD) low_timeout = timeout; /* wait for the syncpoint, or timeout, or signal */ while (timeout) { u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); int remain = wait_event_interruptible_timeout(wq, syncpt_update_min_is_expired(sp, id, thresh), check); if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) { if (value) *value = nvhost_syncpt_read_min(sp, id); err = 0; break; } if (remain < 0) { err = remain; break; } if (timeout != NVHOST_NO_TIMEOUT) timeout -= check; if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", current->comm, id, syncpt_op().name(sp, id), thresh, timeout); syncpt_op().debug(sp); if (check_count == MAX_STUCK_CHECK_COUNT) { if (low_timeout) { dev_warn(&syncpt_to_dev(sp)->dev->dev, "is timeout %d too low?\n", low_timeout); } nvhost_debug_dump(syncpt_to_dev(sp)); } check_count++; } } nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref); done: nvhost_module_idle(syncpt_to_dev(sp)->dev); return err; }
/* ------------------- device --------------------- */ static long audamrnb_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_in *audio = file->private_data; int rc = 0; if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { uint32_t freq; freq = 48000; MM_DBG("AUDIO_START\n"); if (audio->in_call && (audio->voice_state != VOICE_STATE_INCALL)) { rc = -EPERM; break; } rc = msm_snddev_request_freq(&freq, audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("sample rate configured %d\n", freq); if (rc < 0) { MM_DBG(" Sample rate can not be set, return code %d\n", rc); msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); break; } /*update aurec session info in audpreproc layer*/ audio->session_info.session_id = audio->enc_id; /*amrnb works only on 8KHz*/ audio->session_info.sampling_freq = 8000; audpreproc_update_audrec_info(&audio->session_info); rc = audamrnb_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) rc = -ENODEV; else rc = 0; } audio->stopped = 0; break; } case AUDIO_STOP: { /*reset the sampling frequency information at audpreproc layer*/ audio->session_info.sampling_freq = 0; audpreproc_update_audrec_info(&audio->session_info); rc = audamrnb_in_disable(audio); rc = msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); audio->stopped = 1; break; } case AUDIO_FLUSH: { if (audio->stopped) { /* Make sure we're stopped and we wake any threads * that might be blocked holding the read_lock. * While audio->stopped read threads will always * exit immediately. */ wake_up(&audio->wait); mutex_lock(&audio->read_lock); audamrnb_in_flush(audio); mutex_unlock(&audio->read_lock); } break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if (cfg.buffer_size != (FRAME_SIZE - 8)) rc = -EINVAL; else audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->buffer_size; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_GET_AMRNB_ENC_CONFIG_V2: { struct msm_audio_amrnb_enc_config_v2 cfg; memset(&cfg, 0, sizeof(cfg)); cfg.dtx_enable = ((audio->dtx_mode == -1) ? 1 : 0); cfg.band_mode = audio->used_mode; cfg.frame_format = audio->frame_format; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { struct msm_audio_amrnb_enc_config_v2 cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* DSP does not support any other than default format */ if (audio->frame_format != cfg.frame_format) { rc = -EINVAL; break; } if (cfg.dtx_enable == 0) audio->dtx_mode = 0; else if (cfg.dtx_enable == 1) audio->dtx_mode = -1; else { rc = -EINVAL; break; } audio->used_mode = cfg.band_mode; break; } case AUDIO_SET_INCALL: { struct msm_voicerec_mode cfg; unsigned long flags; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } if (cfg.rec_mode != VOC_REC_BOTH && cfg.rec_mode != VOC_REC_UPLINK && cfg.rec_mode != VOC_REC_DOWNLINK) { MM_ERR("invalid rec_mode\n"); rc = -EINVAL; break; } else { spin_lock_irqsave(&audio->dev_lock, flags); if (cfg.rec_mode == VOC_REC_UPLINK) audio->source = VOICE_UL_SOURCE_MIX_MASK; else if (cfg.rec_mode == VOC_REC_DOWNLINK) audio->source = VOICE_DL_SOURCE_MIX_MASK; else audio->source = VOICE_DL_SOURCE_MIX_MASK | VOICE_UL_SOURCE_MIX_MASK ; audio->in_call = 1; spin_unlock_irqrestore(&audio->dev_lock, flags); } break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &audio->enc_id, sizeof(unsigned short))) { rc = -EFAULT; } break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; }
static int touch_event_handler(void *unused) { struct sched_param param = { .sched_priority = RTPM_PRIO_TPD }; struct touch_info cinfo, sinfo; int pending = 0, down = 0; struct touch_info buf[3]; int buf_p=1, buf_c=2, buf_f=0; int dx; cinfo.pending=0; sched_setscheduler(current, SCHED_RR, ¶m); do { MT6516_EINTIRQUnmask(CUST_EINT_TOUCH_PANEL_NUM); // possibly to lose event? set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) { TPD_DEBUG_CHECK_NO_RESPONSE; do { if(pending) wait_event_interruptible_timeout(waiter, tpd_flag!=0, HZ/10); else wait_event_interruptible_timeout(waiter,tpd_flag!=0, HZ*2); } while(0); if(tpd_flag==0 && !pending) continue; // if timeout for no touch, then re-wait. if(tpd_flag!=0 && pending>0) pending=0; tpd_flag=0; TPD_DEBUG_SET_TIME; } set_current_state(TASK_RUNNING); if(!pending) if(tpd_gettouchinfo(&cinfo, &sinfo)) continue; if(pending>1) { pending--; continue; } if(cinfo.count==-1) continue; if(tpd_mode==TPD_MODE_KEYPAD && ((tpd_mode_axis==0 && cinfo.y1>=tpd_mode_min && cinfo.y1<=tpd_mode_max) || (tpd_mode_axis==1 && cinfo.x1>=tpd_mode_min && cinfo.x1<=tpd_mode_max))) { buf_f = ((buf_f+1)%3); buf_c = ((buf_f+2)%3); buf_p = ((buf_f+1)%3); buf[buf_f].x1 = cinfo.x1; buf[buf_f].y1 = cinfo.y1; dx = cinfo.x1 - buf[buf_c].x1; buf[buf_f].count = (cinfo.count?(dx*dx<tpd_mode_keypad_tolerance?buf[buf_c].count+1:1):0); if(buf[buf_c].count<2) if(tpd_up(raw_x1, raw_y1, buf[buf_p].x1, buf[buf_p].y1,&down)) input_sync(tpd->dev); if(buf[buf_c].count>1 || (buf[buf_c].count==1 && ( buf[buf_p].count==0 || buf[buf_f].count==0 || (buf[buf_f].x1-buf[buf_c].x1)*(buf[buf_c].x1-buf[buf_p].x1)<=0))) { tpd_down(raw_x1, raw_y1, buf[buf_c].x1, buf[buf_c].y1, 1); input_sync(tpd->dev); down=1; } if(cinfo.count==0) if(tpd_up(raw_x1, raw_y1, buf[buf_p].x1, buf[buf_p].y1,&down)) input_sync(tpd->dev); } else { switch(cinfo.count) { case 0: if(cinfo.pending>0) pending+=cinfo.pending, cinfo.pending=0; else { if(sinfo.count>=2) { if(pending==0) pending+=1; else { if(tpd_up(raw_x1, raw_y1, sinfo.x1, sinfo.y1, &down) + tpd_up(raw_x2, raw_y2, sinfo.x2,sinfo.y2, &down)) input_sync(tpd->dev); sinfo.count = 0; pending = 0; } } else if(sinfo.count==1) { #ifdef TPD_HAVE_BUTTON if(boot_mode!=NORMAL_BOOT && tpd->btn_state) tpd_button(cinfo.x1, cinfo.y1,0); #endif if(tpd_up(raw_x1, raw_y1, cinfo.x1,cinfo.y1, &down)) input_sync(tpd->dev); sinfo.count = 0; pending=0; } else pending = 0; } TPD_DEBUG_PRINT_UP; break; case 1: if(sinfo.count>=3 || pending==1) { pending = 0; if(sinfo.count==3 && down>1) { if(tpd_up(raw_x1, raw_y1, sinfo.x1,sinfo.y1, &down)) input_sync(tpd->dev); /*tpd_down(cinfo.x1, cinfo.y1, 1); if( (cinfo.x1-sinfo.x1)*(cinfo.x1-sinfo.x1)+(cinfo.y1-sinfo.y1)*(cinfo.y1-sinfo.y1) > (cinfo.x1-sinfo.x2)*(cinfo.x1-sinfo.x2)+(cinfo.y1-sinfo.y2)*(cinfo.y1-sinfo.y2) ) { if(tpd_up(sinfo.x1,sinfo.y1, &down)) input_sync(tpd->dev); } else { if(tpd_up(sinfo.x2,sinfo.y2, &down)) input_sync(tpd->dev); }*/ } } else if(sinfo.count==2) { if(pending==0) pending=1; else { if(tpd_up(raw_x1, raw_y1, cinfo.x1,cinfo.y1, &down) + tpd_up(raw_x2, raw_y2, sinfo.x2,sinfo.y2, &down)) input_sync(tpd->dev); sinfo.x1 = cinfo.x1; sinfo.y1=cinfo.y1; } sinfo.count = 3; } else { #ifdef TPD_HAVE_BUTTON if(boot_mode!=NORMAL_BOOT && cinfo.y1>=TPD_RES_Y) { if(tpd_up(raw_x1, raw_y1, cinfo.x1, cinfo.y1, &down)) input_sync(tpd->dev); tpd_button(cinfo.x1, cinfo.y1, 1); sinfo.count = 1; } else #endif do { #ifdef TPD_HAVE_BUTTON if(boot_mode!=NORMAL_BOOT && tpd->btn_state) tpd_button(cinfo.x1,cinfo.y1,0); #endif tpd_down(raw_x1, raw_y1, cinfo.x1,cinfo.y1, cinfo.p); input_sync(tpd->dev); down = 1; sinfo.count = 1; } while(0); } TPD_DEBUG_PRINT_DOWN; break; case 2: // hold one finger, press another, this code will release both fingers if(sinfo.count==3) { if(tpd_up(raw_x1, raw_y1, sinfo.x1, sinfo.y1, &down) + tpd_up(raw_x2, raw_y2, sinfo.x2, sinfo.y2, &down)) input_sync(tpd->dev); } tpd_smoothing(&cinfo, &sinfo); tpd_down(raw_x1, raw_y1, sinfo.x1, sinfo.y1, 1); tpd_down(raw_x2, raw_y2, sinfo.x2, sinfo.y2, 1); down = 2; sinfo.count = 2; input_sync(tpd->dev); TPD_DEBUG_PRINT_DOWN; break; default: break; } } } while (!kthread_should_stop()); return 0; }
static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct ath6kl *ar = ath6kl_priv(dev); long left; bool sgi; s32 rate; int ret; u8 mcs; if (memcmp(mac, ar->bssid, ETH_ALEN) != 0) return -ENOENT; if (down_interruptible(&ar->sem)) return -EBUSY; set_bit(STATS_UPDATE_PEND, &ar->flag); ret = ath6kl_wmi_get_stats_cmd(ar->wmi); if (ret != 0) { up(&ar->sem); return -EIO; } left = wait_event_interruptible_timeout(ar->event_wq, !test_bit(STATS_UPDATE_PEND, &ar->flag), WMI_TIMEOUT); up(&ar->sem); if (left == 0) return -ETIMEDOUT; else if (left < 0) return left; if (ar->target_stats.rx_byte) { sinfo->rx_bytes = ar->target_stats.rx_byte; sinfo->filled |= STATION_INFO_RX_BYTES; sinfo->rx_packets = ar->target_stats.rx_pkt; sinfo->filled |= STATION_INFO_RX_PACKETS; } if (ar->target_stats.tx_byte) { sinfo->tx_bytes = ar->target_stats.tx_byte; sinfo->filled |= STATION_INFO_TX_BYTES; sinfo->tx_packets = ar->target_stats.tx_pkt; sinfo->filled |= STATION_INFO_TX_PACKETS; } sinfo->signal = ar->target_stats.cs_rssi; sinfo->filled |= STATION_INFO_SIGNAL; rate = ar->target_stats.tx_ucast_rate; if (is_rate_legacy(rate)) { sinfo->txrate.legacy = rate / 100; } else if (is_rate_ht20(rate, &mcs, &sgi)) { if (sgi) { sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; sinfo->txrate.mcs = mcs - 1; } else { sinfo->txrate.mcs = mcs; } sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; } else if (is_rate_ht40(rate, &mcs, &sgi)) { if (sgi) { sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; sinfo->txrate.mcs = mcs - 1; } else { sinfo->txrate.mcs = mcs; } sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; } else { ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "invalid rate from stats: %d\n", rate); ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE); return 0; } sinfo->filled |= STATION_INFO_TX_BITRATE; if (test_bit(CONNECTED, &ar->flag) && test_bit(DTIM_PERIOD_AVAIL, &ar->flag) && ar->nw_type == INFRA_NETWORK) { sinfo->filled |= STATION_INFO_BSS_PARAM; sinfo->bss_param.flags = 0; sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period; sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int; } return 0; }
static ssize_t ssp_test_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct ssp_test_driver *drv = file->private_data; struct slots_buf *sb = &drv->rx; ssize_t retval = 0; int c; DENTER(); if (!drv || !sb || !sb->buffer) { dev_dbg(TEST_DEV, "speech: invalid descriptor in read()\n"); DLEAVE(-EFAULT); return -EFAULT; } /* * If we can't consume fast enough => drop 1 to be read sample * we can't block the modem NB: when num_write wraps around, * we add 4 to both num_write & num_read so we always ensure * num_write >= num_read */ if (sb->num_write == (sb->num_read + BT_PCM_NB_SLOTS)) sb->num_read++; /* * Block Until the end of the Read DMA * Do not wait for interrupt, we get the data manually in * logs (brute method). */ c = wait_event_interruptible_timeout(sb->wait, !sb->dma_running, OPERATION_TIMEOUT); if (c == -ERESTARTSYS) { /* Signal interruption occured */ DLEAVE(c); return c; } if (c == 0) { /* Timeout occured */ dev_warn(TEST_DEV, "read: Timeout occured\n"); intel_mid_i2s_command(handle_ssp1, SSP_CMD_ABORT, NULL); } intel_mid_i2s_command(handle_ssp1, SSP_CMD_DISABLE_SSP, NULL); /* Read ended correctly */ dev_dbg(TEST_DEV, "read(r=%d,w=%d): copy_to_user(%d) %d bytes to %p\n", sb->num_read, sb->num_write, sb->num_read, BT_PCM_SLOT_SIZE, &sb->buffer[IDX_NUM_BYTE(sb->num_read)]); retval = copy_to_user((void __user *)buf, &sb->buffer[IDX_NUM_BYTE(sb->num_read)], BT_PCM_SLOT_SIZE); spin_lock_bh(&sb->lock); /* TODO useless spin_lock */ sb->num_read++; spin_unlock_bh(&sb->lock); DLEAVE(retval); return retval; }
static int jpeg_dec_ioctl(unsigned int cmd, unsigned long arg, struct file *file) { unsigned int* pStatus; unsigned int decResult; long timeout_jiff; JPEG_DEC_DRV_IN dec_params; JPEG_DEC_CONFIG_ROW dec_row_params ; unsigned int irq_st = 0; //unsigned int timeout = 0x1FFFFF; JPEG_DEC_DRV_OUT outParams; pStatus = (unsigned int*)file->private_data; if (NULL == pStatus) { JPEG_MSG("[JPEGDRV]JPEG Decoder: Private data is null in flush operation. SOME THING WRONG??\n"); return -EFAULT; } switch(cmd) { // initial and reset JPEG encoder case JPEG_DEC_IOCTL_INIT: /* OT:OK */ JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Init!!\n"); if(jpeg_drv_dec_init() == 0) { *pStatus = JPEG_DEC_PROCESS; } break; case JPEG_DEC_IOCTL_RESET: /* OT:OK */ JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Reset!!\n"); jpeg_drv_dec_reset(); break; case JPEG_DEC_IOCTL_CONFIG: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Configration!!\n"); if(*pStatus != JPEG_DEC_PROCESS) { JPEG_MSG("[JPEGDRV]Permission Denied! This process can not access decoder\n"); return -EFAULT; } if(dec_status == 0) { JPEG_MSG("[JPEGDRV]JPEG Decoder is unlocked!!"); *pStatus = 0; return -EFAULT; } if(copy_from_user(&dec_params, (void *)arg, sizeof(JPEG_DEC_DRV_IN))) { JPEG_MSG("[JPEGDRV]JPEG Decoder : Copy from user error\n"); return -EFAULT; } //_jpeg_dec_dump_reg_en = dec_params.regDecDumpEn; if(dec_params.decodeMode == JPEG_DEC_MODE_MCU_ROW) _jpeg_dec_mode = 1; else _jpeg_dec_mode = 0; if (jpeg_drv_dec_set_config_data(&dec_params) < 0) return -EFAULT; break; case JPEG_DEC_IOCTL_RESUME: if(*pStatus != JPEG_DEC_PROCESS) { JPEG_MSG("[JPEGDRV]Permission Denied! This process can not access decoder\n"); return -EFAULT; } if(dec_status == 0) { JPEG_MSG("[JPEGDRV]JPEG Decoder is unlocked!!"); *pStatus = 0; return -EFAULT; } if(copy_from_user(&dec_row_params, (void *)arg, sizeof(JPEG_DEC_CONFIG_ROW))) { JPEG_MSG("[JPEGDRV]JPEG Decoder : Copy from user error\n"); return -EFAULT; } JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Resume, [%d] %x %x %x !!\n", dec_row_params.pauseMCU -1,dec_row_params.decRowBuf[0], dec_row_params.decRowBuf[1], dec_row_params.decRowBuf[2]); jpeg_drv_dec_set_dst_bank0( dec_row_params.decRowBuf[0], dec_row_params.decRowBuf[1], dec_row_params.decRowBuf[2]); jpeg_drv_dec_set_pause_mcu_idx(dec_row_params.pauseMCU -1) ; jpeg_drv_dec_resume(BIT_INQST_MASK_PAUSE); break; case JPEG_DEC_IOCTL_START: /* OT:OK */ JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Start!!\n"); //Debug: printk("0xF0: 0x%08x\n", *(volatile unsigned int*)(JPEG_DEC_BASE + 0xF0)); jpeg_drv_dec_start(); break; case JPEG_DEC_IOCTL_WAIT: if(*pStatus != JPEG_DEC_PROCESS) { JPEG_WRN("Permission Denied! This process can not access decoder"); return -EFAULT; } if(dec_status == 0) { JPEG_WRN("Decoder status is available, HOW COULD THIS HAPPEN ??"); *pStatus = 0; return -EFAULT; } if(copy_from_user(&outParams, (void *)arg, sizeof(JPEG_DEC_DRV_OUT))) { JPEG_WRN("JPEG Decoder : Copy from user error\n"); return -EFAULT; } //set timeout timeout_jiff = outParams.timeout* HZ / 1000; //JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Wait Resume Time Jiffies : %ld\n", timeout_jiff); #ifdef FPGA_VERSION //#if 1 JPEG_MSG("[JPEGDRV]Polling JPEG Status"); do { _jpeg_dec_int_status = REG_JPGDEC_INTERRUPT_STATUS; } while(_jpeg_dec_int_status == 0); #else //if(outParams.timeout >= 5000){ // // JPEG_MSG("Polling JPEG Status"); // do // { // _jpeg_dec_int_status = REG_JPGDEC_INTERRUPT_STATUS; // timeout--; // } while(_jpeg_dec_int_status == 0 && timeout != 0); // if(timeout == 0) JPEG_MSG("Polling JPEG Status TIMEOUT!!\n"); //}else if(jpeg_isr_dec_lisr()<0) { //JPEG_MSG("wait JPEG irq\n"); wait_event_interruptible_timeout(dec_wait_queue, _jpeg_dec_int_status, timeout_jiff); JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Enter IRQ Wait Done!!\n"); //printk("[JPEGDRV]wait JPEG irq done\n"); } else { JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Enter IRQ Wait Already Done!!\n"); //printk("[JPEGDRV]JPEG decoder already done\n"); } #endif decResult = jpeg_drv_dec_get_result(); //jpeg_drv_dec_dump_key_reg(); if(decResult >= 2) { JPEG_MSG("[JPEGDRV]Decode Result : %d, status %x!\n", decResult, _jpeg_dec_int_status ); jpeg_drv_dec_dump_key_reg(); //jpeg_drv_dec_dump_reg(); jpeg_drv_dec_reset(); } irq_st = _jpeg_dec_int_status ; decResult = decResult | (irq_st<<8) ; _jpeg_dec_int_status = 0; if(copy_to_user(outParams.result, &decResult, sizeof(unsigned int))) { JPEG_WRN("JPEG Decoder : Copy to user error (result)\n"); return -EFAULT; } break; case JPEG_DEC_IOCTL_BREAK: if (jpeg_drv_dec_break() < 0) return -EFAULT; break; case JPEG_DEC_IOCTL_DUMP_REG: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder DUMP REGISTER !!\n"); jpeg_drv_dec_dump_reg(); break; case JPEG_DEC_IOCTL_DEINIT: JPEG_MSG("[JPEGDRV][IOCTL] JPEG Decoder Deinit !!\n"); // copy input parameters if(*pStatus != JPEG_DEC_PROCESS) { JPEG_ERR("Permission Denied! This process can not access encoder"); return -EFAULT; } if(dec_status == 0) { JPEG_ERR("Encoder status is available, HOW COULD THIS HAPPEN ??"); *pStatus = 0; return -EFAULT; } jpeg_drv_dec_deinit(); *pStatus = 0; break; #ifdef FOR_COMPILE case JPEG_DEC_IOCTL_RW_REG: /* OT:OK */ jpeg_drv_dec_rw_reg(); break; #endif default: JPEG_ERR("JPEG DEC IOCTL NO THIS COMMAND\n"); break; } return 0; }
/* * If possible: WRITE slot per slot, except the 1st time where we * write 2 slots */ static ssize_t ssp_test_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { ssize_t retval = 0; struct ssp_test_driver *drv; struct slots_buf *sb_tx; struct slots_buf *sb_rx; const char *p; int nb_slots = 0; int i; int c; unsigned long missing; DENTER(); p = buffer; drv = file->private_data; sb_rx = &drv->rx; sb_tx = &drv->tx; if (!drv || !sb_tx || !sb_tx->buffer) { dev_dbg(TEST_DEV, "speech: invalid descriptor in write()\n"); DLEAVE(-EFAULT); return -EFAULT; } dev_dbg(TEST_DEV, "write(r=%d, w=%d): ssp_test_write(count=%d, " "ppos=%lld)\n", sb_tx->num_read, sb_tx->num_write, count, *ppos); if (!access_ok(VERIFY_READ, buffer, count)) { dev_dbg(TEST_DEV, "speech: invalid UM buffer in write()\n"); DLEAVE(-EFAULT); return -EFAULT; } /* lock not needed for single writer */ nb_slots = (count/BT_PCM_SLOT_SIZE); if (ssp_test_driver_data.written == 0) { nb_slots = min(nb_slots, 2); /* TODO TEMP PWE */ ssp_test_driver_data.written = 1; } else { nb_slots = min(nb_slots, 1); } for (i = 0; i < nb_slots; i++) { if (bt_verbose > 1 && !buffer_free(sb_tx)) dev_dbg(TEST_DEV, "speech: sleeping in write()\n"); /* sleep if no space for writing data now */ c = wait_event_interruptible_timeout(sb_tx->wait, buffer_free(sb_tx) && !sb_tx->dma_running, OPERATION_TIMEOUT); if (c == -ERESTARTSYS) { /* Signal interruption occured */ DLEAVE(c); return c; } if (c == 0) { /* Timeout occured */ dev_warn(TEST_DEV, "write: Timeout occured\n"); intel_mid_i2s_command(handle_ssp1, SSP_CMD_ABORT, NULL); break; } dev_dbg(TEST_DEV, "write(r=%d,w=%d): copy_from_user %d bytes " "to %p\n", sb_tx->num_read, sb_tx->num_write, BT_PCM_SLOT_SIZE, &sb_tx->buffer[IDX_NUM_BYTE(sb_tx->num_write)]); missing = copy_from_user( &sb_tx->buffer[IDX_NUM_BYTE(sb_tx->num_write)], p, BT_PCM_SLOT_SIZE); if (missing != 0) { dev_dbg(TEST_DEV, "speech: cannot copy UM data in " "write()\n"); DLEAVE(-EFAULT); return -EFAULT; } spin_lock_bh(&sb_tx->lock); sb_tx->num_write++; if (0 == sb_tx->num_write) { /* wrap, but indexes _unchanged_ thanks to the modulo 4 * ensure that we always have: num_write >= num_read */ sb_tx->num_write += BT_PCM_NB_SLOTS; sb_tx->num_read += BT_PCM_NB_SLOTS; } spin_unlock_bh(&sb_tx->lock); p += BT_PCM_SLOT_SIZE; retval += BT_PCM_SLOT_SIZE; } /* for (i=0; i<nb_slots; i++) */ c = wait_event_interruptible_timeout(sb_tx->wait, !sb_tx->dma_running, OPERATION_TIMEOUT); if (c == 0) { /* Timeout occured */ dev_warn(TEST_DEV, "write: Timeout occured\n"); intel_mid_i2s_command(handle_ssp1, SSP_CMD_ABORT, NULL); } spin_lock_bh(&sb_tx->lock); /* DMA transaction initiated here*/ if (sb_tx->num_write > sb_tx->num_read) ssp_test_write_dma_req(sb_tx, sb_rx); spin_unlock_bh(&sb_tx->lock); DLEAVE(retval); return retval; }
static ssize_t q6_evrc_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_client *ac; const char __user *start = buf; struct evrc *evrc = file->private_data; struct evrc_fc *fc; int xfer = 0; int res = 0; pr_debug("[%s:%s] count = %d\n", __MM_FILE__, __func__, count); mutex_lock(&evrc->lock); ac = evrc->audio_client; if (!ac) { res = -ENODEV; goto fail; } fc = evrc->evrc_fc; while (count > xfer) { /*wait for buffer to full*/ if (fc->fc_buff[fc->buff_index].empty != 0) { res = wait_event_interruptible_timeout(fc->fc_wq, (fc->fc_buff[fc->buff_index].empty == 0), msecs_to_jiffies(EVRC_READ_TIMEOUT)); pr_debug("[%s:%s] buff_index = %d\n", __MM_FILE__, __func__, fc->buff_index); if (res == 0) { pr_err("[%s:%s] Timeout!\n", __MM_FILE__, __func__); res = -ETIMEDOUT; goto fail; } else if (res < 0) { pr_err("[%s:%s] Returning on Interrupt\n", __MM_FILE__, __func__); goto fail; } } /*lock the buffer*/ mutex_lock(&(fc->fc_buff[fc->buff_index].lock)); xfer = fc->fc_buff[fc->buff_index].actual_size; if (xfer > count) { mutex_unlock(&(fc->fc_buff[fc->buff_index].lock)); pr_err("[%s:%s] read failed! byte count too small\n", __MM_FILE__, __func__); res = -EINVAL; goto fail; } if (copy_to_user(buf, fc->fc_buff[fc->buff_index].data, xfer)) { mutex_unlock(&(fc->fc_buff[fc->buff_index].lock)); pr_err("[%s:%s] copy_to_user failed at index %d\n", __MM_FILE__, __func__, fc->buff_index); res = -EFAULT; goto fail; } buf += xfer; count -= xfer; fc->fc_buff[fc->buff_index].empty = 1; fc->fc_buff[fc->buff_index].actual_size = 0; mutex_unlock(&(fc->fc_buff[fc->buff_index].lock)); ++(fc->buff_index); if (fc->buff_index >= EVRC_FC_BUFF_CNT) fc->buff_index = 0; } res = buf - start; fail: mutex_unlock(&evrc->lock); return res; }
static void gs_close(struct tty_struct *tty, struct file *file) { struct gs_port *port = tty->driver_data; struct gserial *gser; spin_lock_irq(&port->port_lock); if (port->open_count != 1) { if (port->open_count == 0) WARN_ON(1); else --port->open_count; goto exit; } pr_vdebug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file); /* mark port as closing but in use; we can drop port lock * and sleep if necessary */ port->openclose = true; port->open_count = 0; gser = port->port_usb; #if ACM_TTY_SUPPORT_NOTIFY if (gser && gser->disconnect) gser->disconnect(gser); #endif /* wait for circular write buffer to drain, disconnect, or at * most GS_CLOSE_TIMEOUT seconds; then discard the rest */ if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { spin_unlock_irq(&port->port_lock); wait_event_interruptible_timeout(port->drain_wait, gs_writes_finished(port), GS_CLOSE_TIMEOUT * HZ); spin_lock_irq(&port->port_lock); gser = port->port_usb; } /* Iff we're disconnected, there can be no I/O in flight so it's * ok to free the circular buffer; else just scrub it. And don't * let the push tasklet fire again until we're re-opened. */ if (gser == NULL) gs_buf_free(&port->port_write_buf); else gs_buf_clear(&port->port_write_buf); tty->driver_data = NULL; port->port_tty = NULL; port->openclose = false; pr_vdebug("gs_close: ttyGS%d (%p,%p) done!\n", port->port_num, tty, file); wake_up_interruptible(&port->close_wait); exit: spin_unlock_irq(&port->port_lock); }
static ssize_t audpcm_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_in *audio = file->private_data; unsigned long flags; const char __user *start = buf; void *data; uint32_t index; uint32_t size; int rc = 0; mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible_timeout( audio->wait, (audio->in_count > 0) || audio->stopped, msecs_to_jiffies(MSM_AUD_BUFFER_UPDATE_WAIT_MS)); if (rc == 0) { rc = -ETIMEDOUT; break; } else if (rc < 0) { break; } if (audio->stopped && !audio->in_count) { rc = 0;/* End of File */ break; } index = audio->in_tail; data = (uint8_t *) audio->in[index].data; size = audio->in[index].size; if (count >= size) { /* order the reads on the buffer */ dma_coherent_post_ops(); if (copy_to_user(buf, data, size)) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); if (index != audio->in_tail) { /* overrun -- data is invalid and we need to * retry */ spin_unlock_irqrestore(&audio->dsp_lock, flags); continue; } audio->in[index].size = 0; audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); audio->in_count--; spin_unlock_irqrestore(&audio->dsp_lock, flags); count -= size; buf += size; } else { MM_ERR("short read\n"); break; } } mutex_unlock(&audio->read_lock); if (buf > start) return buf - start; return rc; }
int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { int cmd_idx; int ret; BUG_ON(cmd->flags & CMD_ASYNC); /* A synchronous command can not have a callback set. */ BUG_ON(cmd->callback); IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", get_cmd_string(cmd->id)); mutex_lock(&priv->sync_cmd_mutex); set_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); cmd_idx = iwl_enqueue_hcmd(priv, cmd); if (cmd_idx < 0) { ret = cmd_idx; IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); goto out; } ret = wait_event_interruptible_timeout(priv->wait_command_queue, !test_bit(STATUS_HCMD_ACTIVE, &priv->status), HOST_COMPLETE_TIMEOUT); if (!ret) { if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { IWL_ERR(priv, "Error sending %s: time out after %dms.\n", get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); clear_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); ret = -ETIMEDOUT; goto cancel; } } if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", get_cmd_string(cmd->id)); ret = -ECANCELED; goto fail; } if (test_bit(STATUS_FW_ERROR, &priv->status)) { IWL_ERR(priv, "Command %s failed: FW Error\n", get_cmd_string(cmd->id)); ret = -EIO; goto fail; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { IWL_ERR(priv, "Error: Response NULL in '%s'\n", get_cmd_string(cmd->id)); ret = -EIO; goto cancel; } ret = 0; goto out; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } fail: if (cmd->reply_page) { iwl_free_pages(priv, cmd->reply_page); cmd->reply_page = 0; } out: mutex_unlock(&priv->sync_cmd_mutex); return ret; }
static long vocpcm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct voc_ctxt *ctxt = file->private_data; struct buffer *frame; unsigned long flags; uint32_t index; uint32_t data_index; uint32_t len = 0; uint8_t *dest; int rc = 0; mutex_lock(&ctxt->lock); switch (cmd) { case VOCPCM_REGISTER_CLIENT: rc = voc_register_client(ctxt); break; case VOCPCM_UNREGISTER_CLIENT: if (ctxt->intr % 2) { if (ctxt->s_ptr) { index = ctxt->head; frame = &ctxt->buf[index]; data_index = FRAME_NUM * FRAME_SIZE - 1; dest = (uint8_t *)&frame->data[data_index] + 1; len = dest - ctxt->s_ptr + 1; memset(ctxt->s_ptr, 0, len); spin_lock_irqsave(&ctxt->dsp_lock, flags); frame->index = 0; ctxt->head = (ctxt->head + 1) & (BUFFER_NUM - 1); ctxt->count++; ctxt->final_input = 1; spin_unlock_irqrestore(&ctxt->dsp_lock, flags); rc = wait_event_interruptible_timeout( ctxt->last_write_wait, ctxt->count == 0, 5 * HZ); if (rc < 0) break; } } else { pr_info("voc: stopping vocpcm_read()\n"); ctxt->final_input = 1; wake_up_interruptible(&ctxt->wait); } if (ctxt->client) rc = voc_unregister_client(ctxt); else pr_err("voc: no %d client to unregister.", ctxt->intr); break; default: pr_err("voc: unknown command.\n"); rc = -EINVAL; break; } mutex_unlock(&ctxt->lock); return rc; }
static ssize_t _esparser_write(const char __user *buf, size_t count, u32 type) { size_t r = count; const char __user *p = buf; u32 len = 0; u32 parser_type; int ret; u32 wp; if (type == BUF_TYPE_VIDEO) { parser_type = PARSER_VIDEO; } else if (type == BUF_TYPE_AUDIO) { parser_type = PARSER_AUDIO; } else { parser_type = PARSER_SUBPIC; } wp = buf_rp(type); if (r > 0) { len = min(r, (size_t)FETCHBUF_SIZE); if (copy_from_user(fetchbuf_remap, p, len)) { return -EFAULT; } wmb(); // reset the Write and read pointer to zero again WRITE_MPEG_REG(PFIFO_RD_PTR, 0); WRITE_MPEG_REG(PFIFO_WR_PTR, 0); WRITE_MPEG_REG_BITS(PARSER_CONTROL, len, ES_PACK_SIZE_BIT, ES_PACK_SIZE_WID); WRITE_MPEG_REG_BITS(PARSER_CONTROL, parser_type | PARSER_WRITE | PARSER_AUTOSEARCH, ES_CTRL_BIT, ES_CTRL_WID); WRITE_MPEG_REG(PARSER_FETCH_ADDR, virt_to_phys((u8 *)fetchbuf)); WRITE_MPEG_REG(PARSER_FETCH_CMD, (7 << FETCH_ENDIAN) | len); search_done = 0; WRITE_MPEG_REG(PARSER_FETCH_ADDR, search_pattern_map); WRITE_MPEG_REG(PARSER_FETCH_CMD, (7 << FETCH_ENDIAN) | SEARCH_PATTERN_LEN); ret = wait_event_interruptible_timeout(wq, search_done != 0, HZ/10); if (ret == 0) { WRITE_MPEG_REG(PARSER_FETCH_CMD, 0); if (wp == buf_rp(type)) { /*no data fetched*/ return -EAGAIN; } else { printk("write timeout, but fetched ok,len=%d,wpdiff=%d\n", len, wp - buf_rp(type)); } } else if (ret < 0) { return -ERESTARTSYS; } } if (type == BUF_TYPE_VIDEO) { video_data_parsed += len; } else if (type == BUF_TYPE_AUDIO) { audio_data_parsed += len; } return len; }