static void mtd_ipanic_block_erase(void) { struct mtd_ipanic_data *ctx = &mtd_drv_ctx; struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; int rc, i; init_waitqueue_head(&wait_q); erase.mtd = ctx->mtd; erase.callback = mtd_ipanic_block_erase_callback; erase.len = ctx->mtd->erasesize; erase.priv = (u_long)&wait_q; for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) { erase.addr = i; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); rc = ctx->mtd->_block_isbad(ctx->mtd, erase.addr); if (rc < 0) { xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG, "aee-ipanic: Bad block check " "failed (%d)\n", rc); goto out; } if (rc) { xlog_printk(ANDROID_LOG_WARN, IPANIC_LOG_TAG, "aee-ipanic: Skipping erase of bad " "block @%llx\n", erase.addr); set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); continue; } rc = ctx->mtd->_erase(ctx->mtd, &erase); if (rc) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG, "aee-ipanic: Erase of 0x%llx, 0x%llx failed\n", (unsigned long long) erase.addr, (unsigned long long) erase.len); if (rc == -EIO) { if (ctx->mtd->_block_markbad(ctx->mtd, erase.addr)) { xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG, "aee-ipanic: Err marking blk bad\n"); goto out; } xlog_printk(ANDROID_LOG_INFO, IPANIC_LOG_TAG, "aee-ipanic: Marked a bad block" " @%llx\n", erase.addr); continue; } goto out; } schedule(); remove_wait_queue(&wait_q, &wait); } xlog_printk(ANDROID_LOG_DEBUG, IPANIC_LOG_TAG, "aee-ipanic: %s partition erased\n", AEE_IPANIC_PLABEL); out: return; }
/** * n_hdlc_tty_write - write a single frame of data to device * @tty - pointer to associated tty device instance data * @file - pointer to file object data * @data - pointer to transmit data (one frame) * @count - size of transmit frame in bytes * * Returns the number of bytes written (or error code). */ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *data, size_t count) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); int error = 0; DECLARE_WAITQUEUE(wait, current); struct n_hdlc_buf *tbuf; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_write() called count=%zd\n", __FILE__,__LINE__,count); /* Verify pointers */ if (!n_hdlc) return -EIO; if (n_hdlc->magic != HDLC_MAGIC) return -EIO; /* verify frame size */ if (count > maxframe ) { if (debuglevel & DEBUG_LEVEL_INFO) printk (KERN_WARNING "n_hdlc_tty_write: truncating user packet " "from %lu to %d\n", (unsigned long) count, maxframe ); count = maxframe; } add_wait_queue(&tty->write_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list); if (tbuf) break; if (tty_io_nonblock(tty, file)) { error = -EAGAIN; break; } schedule(); n_hdlc = tty2n_hdlc (tty); if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || tty != n_hdlc->tty) { printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc); error = -EIO; break; } if (signal_pending(current)) { error = -EINTR; break; } } __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (!error) { /* Retrieve the user's buffer */ memcpy(tbuf->buf, data, count); /* Send the data */ tbuf->count = error = count; n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); n_hdlc_send_frames(n_hdlc,tty); } return error; } /* end of n_hdlc_tty_write() */
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { unsigned char __user *b = buf; DECLARE_WAITQUEUE(wait, current); int c; int minimum, time; ssize_t retval = 0; ssize_t size; long timeout; unsigned long flags; int packet; do_it_again: if (WARN_ON(!tty->read_buf)) return -EAGAIN; c = job_control(tty, file); if (c < 0) return c; minimum = time = 0; timeout = MAX_SCHEDULE_TIMEOUT; if (!tty->icanon) { time = (HZ / 10) * TIME_CHAR(tty); minimum = MIN_CHAR(tty); if (minimum) { if (time) tty->minimum_to_wake = 1; else if (!waitqueue_active(&tty->read_wait) || (tty->minimum_to_wake > minimum)) tty->minimum_to_wake = minimum; } else { timeout = 0; if (time) { timeout = time; time = 0; } tty->minimum_to_wake = minimum = 1; } } if (file->f_flags & O_NONBLOCK) { if (!mutex_trylock(&tty->atomic_read_lock)) return -EAGAIN; } else { if (mutex_lock_interruptible(&tty->atomic_read_lock)) return -ERESTARTSYS; } packet = tty->packet; add_wait_queue(&tty->read_wait, &wait); while (nr) { if (packet && tty->link->ctrl_status) { unsigned char cs; if (b != buf) break; spin_lock_irqsave(&tty->link->ctrl_lock, flags); cs = tty->link->ctrl_status; tty->link->ctrl_status = 0; spin_unlock_irqrestore(&tty->link->ctrl_lock, flags); if (tty_put_user(tty, cs, b++)) { retval = -EFAULT; b--; break; } nr--; break; } set_current_state(TASK_INTERRUPTIBLE); if (((minimum - (b - buf)) < tty->minimum_to_wake) && ((minimum - (b - buf)) >= 1)) tty->minimum_to_wake = (minimum - (b - buf)); if (!input_available_p(tty, 0)) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { retval = -EIO; break; } if (tty_hung_up_p(file)) break; if (!timeout) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } n_tty_set_room(tty); timeout = schedule_timeout(timeout); BUG_ON(!tty->read_buf); continue; } __set_current_state(TASK_RUNNING); if (packet && b == buf) { if (tty_put_user(tty, TIOCPKT_DATA, b++)) { retval = -EFAULT; b--; break; } nr--; } if (tty->icanon && !L_EXTPROC(tty)) { while (nr && tty->read_cnt) { int eol; eol = test_and_clear_bit(tty->read_tail, tty->read_flags); c = tty->read_buf[tty->read_tail]; spin_lock_irqsave(&tty->read_lock, flags); tty->read_tail = ((tty->read_tail+1) & (N_TTY_BUF_SIZE-1)); tty->read_cnt--; if (eol) { if (--tty->canon_data < 0) tty->canon_data = 0; } spin_unlock_irqrestore(&tty->read_lock, flags); if (!eol || (c != __DISABLED_CHAR)) { if (tty_put_user(tty, c, b++)) { retval = -EFAULT; b--; break; } nr--; } if (eol) { tty_audit_push(tty); break; } } if (retval) break; } else { int uncopied; uncopied = copy_from_read_buf(tty, &b, &nr); uncopied += copy_from_read_buf(tty, &b, &nr); if (uncopied) { retval = -EFAULT; break; } } if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) { n_tty_set_room(tty); check_unthrottle(tty); } if (b - buf >= minimum) break; if (time) timeout = time; } mutex_unlock(&tty->atomic_read_lock); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) tty->minimum_to_wake = minimum; __set_current_state(TASK_RUNNING); size = b - buf; if (size) { retval = size; if (nr) clear_bit(TTY_PUSH, &tty->flags); } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) goto do_it_again; n_tty_set_room(tty); return retval; }
static int mtd_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long arg) { struct mtd_info *mtd = (struct mtd_info *)file->private_data; int ret = 0; u_long size; DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; if (cmd & IOC_IN) { ret = verify_area(VERIFY_READ, (char *)arg, size); if (ret) return ret; } if (cmd & IOC_OUT) { ret = verify_area(VERIFY_WRITE, (char *)arg, size); if (ret) return ret; } switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user((int *) arg, &(mtd->numeraseregions), sizeof(int))) return -EFAULT; break; case MEMGETREGIONINFO: { struct region_info_user ur; if (copy_from_user( &ur, (struct region_info_user *)arg, sizeof(struct region_info_user))) { return -EFAULT; } if (ur.regionindex >= mtd->numeraseregions) return -EINVAL; if (copy_to_user((struct mtd_erase_region_info *) arg, &(mtd->eraseregions[ur.regionindex]), sizeof(struct mtd_erase_region_info))) return -EFAULT; break; } case MEMGETINFO: if (copy_to_user((struct mtd_info *)arg, mtd, sizeof(struct mtd_info_user))) return -EFAULT; break; case MEMERASE: { struct erase_info *erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; else { wait_queue_head_t waitq; DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&waitq); memset (erase,0,sizeof(struct erase_info)); if (copy_from_user(&erase->addr, (u_long *)arg, 2 * sizeof(u_long))) { kfree(erase); return -EFAULT; } erase->mtd = mtd; erase->callback = mtd_erase_callback; erase->priv = (unsigned long)&waitq; /* FIXME: Allow INTERRUPTIBLE. Which means not having the wait_queue head on the stack. If the wq_head is on the stack, and we leave because we got interrupted, then the wq_head is no longer there when the callback routine tries to wake us up. */ ret = mtd->erase(mtd, erase); if (!ret) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&waitq, &wait); if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED) schedule(); remove_wait_queue(&waitq, &wait); set_current_state(TASK_RUNNING); ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; } kfree(erase); } break; } case MEMWRITEOOB: { struct mtd_oob_buf buf; void *databuf; ssize_t retlen; if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf))) return -EFAULT; if (buf.length > 0x4096) return -EINVAL; if (!mtd->write_oob) ret = -EOPNOTSUPP; else ret = verify_area(VERIFY_READ, (char *)buf.ptr, buf.length); if (ret) return ret; databuf = kmalloc(buf.length, GFP_KERNEL); if (!databuf) return -ENOMEM; if (copy_from_user(databuf, buf.ptr, buf.length)) { kfree(databuf); return -EFAULT; } ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf); if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t))) ret = -EFAULT; kfree(databuf); break; } case MEMREADOOB: { struct mtd_oob_buf buf; void *databuf; ssize_t retlen; if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf))) return -EFAULT; if (buf.length > 0x4096) return -EINVAL; if (!mtd->read_oob) ret = -EOPNOTSUPP; else ret = verify_area(VERIFY_WRITE, (char *)buf.ptr, buf.length); if (ret) return ret; databuf = kmalloc(buf.length, GFP_KERNEL); if (!databuf) return -ENOMEM; ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf); if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t))) ret = -EFAULT; else if (retlen && copy_to_user(buf.ptr, databuf, retlen)) ret = -EFAULT; kfree(databuf); break; } case MEMLOCK: { unsigned long adrs[2]; if (copy_from_user(adrs ,(void *)arg, 2* sizeof(unsigned long))) return -EFAULT; if (!mtd->lock) ret = -EOPNOTSUPP; else ret = mtd->lock(mtd, adrs[0], adrs[1]); break; } case MEMUNLOCK: { unsigned long adrs[2]; if (copy_from_user(adrs, (void *)arg, 2* sizeof(unsigned long))) return -EFAULT; if (!mtd->unlock) ret = -EOPNOTSUPP; else ret = mtd->unlock(mtd, adrs[0], adrs[1]); break; } default: DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %x)\n", cmd, MEMGETINFO); ret = -ENOTTY; } return ret; } /* memory_ioctl */
int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end) { struct cx18 *cx = s->cx; unsigned long then; if (!cx18_stream_enabled(s)) return -EINVAL; /* This function assumes that you are allowed to stop the capture and that we are actually capturing */ CX18_DEBUG_INFO("Stop Capture\n"); if (atomic_read(&cx->tot_capturing) == 0) return 0; set_bit(CX18_F_S_STOPPING, &s->s_flags); if (s->type == CX18_ENC_STREAM_TYPE_MPG) cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end); else cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle); then = jiffies; if (s->type == CX18_ENC_STREAM_TYPE_MPG && gop_end) { #if 0 /* only run these if we're shutting down the last cap */ DECLARE_WAITQUEUE(wait, current); unsigned long duration; then = jiffies; add_wait_queue(&cx->cap_w, &wait); set_current_state(TASK_INTERRUPTIBLE); /* TODO: wait 2s for EOS interrupt */ while (!test_bit(CX18_F_I_EOS, &cx->i_flags) && time_before(jiffies, then + msecs_to_jiffies(2000))) schedule_timeout(msecs_to_jiffies(10)); duration = jiffies_to_msecs(jiffies - then); if (!test_bit(CX18_F_I_EOS, &cx->i_flags)) { CX18_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name); CX18_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration); } else { CX18_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration); } set_current_state(TASK_RUNNING); remove_wait_queue(&cx->cap_w, &wait); #else CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n"); #endif } if (s->type != CX18_ENC_STREAM_TYPE_TS) atomic_dec(&cx->ana_capturing); atomic_dec(&cx->tot_capturing); /* Clear capture and no-read bits */ clear_bit(CX18_F_S_STREAMING, &s->s_flags); /* Tell the CX23418 it can't use our buffers anymore */ cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle); cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); s->handle = CX18_INVALID_TASK_HANDLE; clear_bit(CX18_F_S_STOPPING, &s->s_flags); if (atomic_read(&cx->tot_capturing) > 0) return 0; cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK); wake_up(&s->waitq); return 0; }
/* ======================================================================== Routine Description: Close raxx interface. Arguments: *net_dev the raxx interface pointer Return Value: 0 Open OK otherwise Open Fail Note: 1. if open fail, kernel will not call the close function. 2. Free memory for (1) Mlme Memory Handler: MlmeHalt() (2) TX & RX: RTMPFreeTxRxRingMemory() (3) BA Reordering: ba_reordering_resource_release() ======================================================================== */ int rt28xx_close(struct net_device *dev) { struct net_device *net_dev = (struct net_device *)dev; struct rt_rtmp_adapter *pAd = NULL; BOOLEAN Cancelled; u32 i = 0; #ifdef RTMP_MAC_USB DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); #endif /* RTMP_MAC_USB // */ GET_PAD_FROM_NET_DEV(pAd, net_dev); DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n")); Cancelled = FALSE; /* Sanity check for pAd */ if (pAd == NULL) return 0; /* close ok */ { #ifdef RTMP_MAC_PCI RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_CLOSE); #endif /* RTMP_MAC_PCI // */ /* If dirver doesn't wake up firmware here, */ /* NICLoadFirmware will hang forever when interface is up again. */ if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) { AsicForceWakeup(pAd, TRUE); } #ifdef RTMP_MAC_USB RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS); #endif /* RTMP_MAC_USB // */ MlmeRadioOff(pAd); #ifdef RTMP_MAC_PCI pAd->bPCIclkOff = FALSE; #endif /* RTMP_MAC_PCI // */ } RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); for (i = 0; i < NUM_OF_TX_RING; i++) { while (pAd->DeQueueRunning[i] == TRUE) { DBGPRINT(RT_DEBUG_TRACE, ("Waiting for TxQueue[%d] done..........\n", i)); RTMPusecDelay(1000); } } #ifdef RTMP_MAC_USB /* ensure there are no more active urbs. */ add_wait_queue(&unlink_wakeup, &wait); pAd->wait = &unlink_wakeup; /* maybe wait for deletions to finish. */ i = 0; /*while((i < 25) && atomic_read(&pAd->PendingRx) > 0) */ while (i < 25) { unsigned long IrqFlags; RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags); if (pAd->PendingRx == 0) { RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags); break; } RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags); msleep(UNLINK_TIMEOUT_MS); /*Time in millisecond */ i++; } pAd->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); #endif /* RTMP_MAC_USB // */ /* Stop Mlme state machine */ MlmeHalt(pAd); /* Close net tasklets */ RtmpNetTaskExit(pAd); { MacTableReset(pAd); } MeasureReqTabExit(pAd); TpcReqTabExit(pAd); /* Close kernel threads */ RtmpMgmtTaskExit(pAd); #ifdef RTMP_MAC_PCI { BOOLEAN brc; /* unsigned long Value; */ if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE)) { RTMP_ASIC_INTERRUPT_DISABLE(pAd); } /* Receive packets to clear DMA index after disable interrupt. */ /*RTMPHandleRxDoneInterrupt(pAd); */ /* put to radio off to save power when driver unload. After radiooff, can't write /read register. So need to finish all */ /* register access before Radio off. */ brc = RT28xxPciAsicRadioOff(pAd, RTMP_HALT, 0); /*In solution 3 of 3090F, the bPCIclkOff will be set to TRUE after calling RT28xxPciAsicRadioOff */ pAd->bPCIclkOff = FALSE; if (brc == FALSE) { DBGPRINT(RT_DEBUG_ERROR, ("%s call RT28xxPciAsicRadioOff fail!\n", __func__)); } } /* if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_ACTIVE)) { RTMP_ASIC_INTERRUPT_DISABLE(pAd); } // Disable Rx, register value supposed will remain after reset NICIssueReset(pAd); */ #endif /* RTMP_MAC_PCI // */ /* Free IRQ */ if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE)) { #ifdef RTMP_MAC_PCI /* Deregister interrupt function */ RtmpOSIRQRelease(net_dev); #endif /* RTMP_MAC_PCI // */ RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE); } /* Free Ring or USB buffers */ RTMPFreeTxRxRingMemory(pAd); RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); /* Free BA reorder resource */ ba_reordering_resource_release(pAd); RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP); /*+++Modify by woody to solve the bulk fail+++*/ { } DBGPRINT(RT_DEBUG_TRACE, ("<=== rt28xx_close\n")); return 0; /* close ok */ } /* End of rt28xx_close */
int hsi_proto_read(int ch, u32 *buffer, int count) { DECLARE_WAITQUEUE(wait, current); u32 *data; unsigned int data_len = 0; struct protocol_queue *entry; int ret, recv_data = 0; /*if (count > MAX_HSI_IPC_BUFFER) count = MAX_HSI_IPC_BUFFER; data = kmalloc(count, GFP_ATOMIC);*/ ret = if_hsi_read(ch, buffer, count); if (ret < 0) { pr_err("Can not submit read. READ Error\n"); goto out2; } spin_lock_bh(&hsi_protocol_data[ch].lock); add_wait_queue(&hsi_protocol_data[ch].rx_wait, &wait); spin_unlock_bh(&hsi_protocol_data[ch].lock); for (;;) { data = NULL; data_len = 0; set_current_state(TASK_INTERRUPTIBLE); spin_lock_bh(&hsi_protocol_data[ch].lock); if (!list_empty(&hsi_protocol_data[ch].rx_queue)) { entry = list_entry(hsi_protocol_data[ch].rx_queue.next, struct protocol_queue, list); data = entry->data; data_len = entry->count; list_del(&entry->list); kfree(entry); } spin_unlock_bh(&hsi_protocol_data[ch].lock); pr_debug("%s, data = 0x%p, data_len = %d\n", __func__, data, data_len); if (data_len) { pr_debug("%s, RX finished, ch-> %d, length = %d\n", __func__, ch, count); spin_lock_bh(&hsi_protocol_data[ch].lock); hsi_protocol_data[ch].poll_event &= ~(POLLIN | POLLRDNORM); spin_unlock_bh(&hsi_protocol_data[ch].lock); if_hsi_poll(ch); #if 0 memcpy(buffer, data, count); #endif recv_data += data_len; #if 0 buffer += data_len; if ((recv_data == count) || (recv_data >= MAX_HSI_IPC_BUFFER)) #endif break; } else if (signal_pending(current)) { pr_debug("%s, ERESTARTSYS\n", __func__); recv_data = -EAGAIN; if_hsi_cancel_read(ch); /* goto out; */ break; } /*printk(KERN_DEBUG "%s, going to sleep...\n", __func__); */ schedule(); /*printk(KERN_DEBUG "%s, woke up\n", __func__); */ }
/* ======================================================================== Routine Description: Close raxx interface. Arguments: *net_dev the raxx interface pointer Return Value: 0 Open OK otherwise Open Fail Note: 1. if open fail, kernel will not call the close function. 2. Free memory for (1) Mlme Memory Handler: MlmeHalt() (2) TX & RX: RTMPFreeTxRxRingMemory() (3) BA Reordering: ba_reordering_resource_release() ======================================================================== */ int rt28xx_close(IN PNET_DEV dev) { struct net_device * net_dev = (struct net_device *)dev; RTMP_ADAPTER *pAd = RTMP_OS_NETDEV_GET_PRIV(net_dev); BOOLEAN Cancelled; UINT32 i = 0; #ifdef RTMP_MAC_USB DECLARE_WAIT_QUEUE_HEAD(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); //RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS); #endif // RTMP_MAC_USB // DBGPRINT(RT_DEBUG_TRACE, ("===> rt28xx_close\n")); Cancelled = FALSE; // Sanity check for pAd if (pAd == NULL) return 0; // close ok #ifdef WDS_SUPPORT WdsDown(pAd); #endif // WDS_SUPPORT // #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { // If dirver doesn't wake up firmware here, // NICLoadFirmware will hang forever when interface is up again. if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) { AsicForceWakeup(pAd, TRUE); } #ifdef RTMP_MAC_USB RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS); #endif // RTMP_MAC_USB // MlmeRadioOff(pAd); } #endif // CONFIG_STA_SUPPORT // RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); for (i = 0 ; i < NUM_OF_TX_RING; i++) { while (pAd->DeQueueRunning[i] == TRUE) { DBGPRINT(RT_DEBUG_TRACE, ("Waiting for TxQueue[%d] done..........\n", i)); RTMPusecDelay(1000); } } #ifdef RTMP_MAC_USB // ensure there are no more active urbs. add_wait_queue (&unlink_wakeup, &wait); pAd->wait = &unlink_wakeup; // maybe wait for deletions to finish. i = 0; //while((i < 25) && atomic_read(&pAd->PendingRx) > 0) while(i < 25) { unsigned long IrqFlags; RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags); if (pAd->PendingRx == 0) { RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags); break; } RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) msleep(UNLINK_TIMEOUT_MS); //Time in millisecond #else RTMPusecDelay(UNLINK_TIMEOUT_MS*1000); //Time in microsecond #endif i++; } pAd->wait = NULL; remove_wait_queue (&unlink_wakeup, &wait); #endif // RTMP_MAC_USB // // Stop Mlme state machine MlmeHalt(pAd); // Close net tasklets RtmpNetTaskExit(pAd); #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { MacTableReset(pAd); } #endif // CONFIG_STA_SUPPORT // MeasureReqTabExit(pAd); TpcReqTabExit(pAd); // Close kernel threads RtmpMgmtTaskExit(pAd); // Free IRQ if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE)) { RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE); } // Free Ring or USB buffers RTMPFreeTxRxRingMemory(pAd); RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS); #ifdef DOT11_N_SUPPORT // Free BA reorder resource ba_reordering_resource_release(pAd); #endif // DOT11_N_SUPPORT // #ifdef CONFIG_STA_SUPPORT #endif // CONFIG_STA_SUPPORT // RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_START_UP); /*+++Modify by woody to solve the bulk fail+++*/ #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { } #endif // CONFIG_STA_SUPPORT // DBGPRINT(RT_DEBUG_TRACE, ("<=== rt28xx_close\n")); return 0; // close ok } /* End of rt28xx_close */
/* * AFS Cache Manager kernel thread */ static int kafscmd(void *arg) { DECLARE_WAITQUEUE(myself, current); struct rxrpc_call *call; _SRXAFSCM_xxxx_t func; int die; printk("kAFS: Started kafscmd %d\n", current->pid); daemonize("kafscmd"); complete(&kafscmd_alive); /* loop around looking for things to attend to */ do { if (list_empty(&kafscmd_attention_list)) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kafscmd_sleepq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!list_empty(&kafscmd_attention_list) || signal_pending(current) || kafscmd_die) break; schedule(); } remove_wait_queue(&kafscmd_sleepq, &myself); set_current_state(TASK_RUNNING); } die = kafscmd_die; /* dequeue the next call requiring attention */ call = NULL; spin_lock(&kafscmd_attention_lock); if (!list_empty(&kafscmd_attention_list)) { call = list_entry(kafscmd_attention_list.next, struct rxrpc_call, app_attn_link); list_del_init(&call->app_attn_link); die = 0; } spin_unlock(&kafscmd_attention_lock); if (call) { /* act upon it */ _debug("@@@ Begin Attend Call %p", call); func = call->app_user; if (func) func(call); rxrpc_put_call(call); _debug("@@@ End Attend Call %p", call); } } while(!die);
int gs_block_til_ready(void *port_, struct file * filp) { struct gs_port *gp = port_; struct tty_port *port = &gp->port; DECLARE_WAITQUEUE(wait, current); int retval; int do_clocal = 0; int CD; struct tty_struct *tty; unsigned long flags; func_enter (); if (!port) return 0; tty = port->tty; gs_dprintk (GS_DEBUG_BTR, "Entering gs_block_till_ready.\n"); /* * If the device is in the middle of being closed, then block * until it's done, and then try again. */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { interruptible_sleep_on(&port->close_wait); if (port->flags & ASYNC_HUP_NOTIFY) return -EAGAIN; else return -ERESTARTSYS; } gs_dprintk (GS_DEBUG_BTR, "after hung up\n"); /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { port->flags |= ASYNC_NORMAL_ACTIVE; return 0; } gs_dprintk (GS_DEBUG_BTR, "after nonblock\n"); if (C_CLOCAL(tty)) do_clocal = 1; /* * Block waiting for the carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, port->count is dropped by one, so that * rs_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&port->open_wait, &wait); gs_dprintk (GS_DEBUG_BTR, "after add waitq.\n"); spin_lock_irqsave(&port->lock, flags); if (!tty_hung_up_p(filp)) { port->count--; } port->blocked_open++; spin_unlock_irqrestore(&port->lock, flags); while (1) { CD = tty_port_carrier_raised(port); gs_dprintk (GS_DEBUG_BTR, "CD is now %d.\n", CD); set_current_state (TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) { if (port->flags & ASYNC_HUP_NOTIFY) retval = -EAGAIN; else retval = -ERESTARTSYS; break; } if (!(port->flags & ASYNC_CLOSING) && (do_clocal || CD)) break; gs_dprintk (GS_DEBUG_BTR, "signal_pending is now: %d (%lx)\n", (int)signal_pending (current), *(long*)(¤t->blocked)); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } gs_dprintk (GS_DEBUG_BTR, "Got out of the loop. (%d)\n", port->blocked_open); set_current_state (TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); spin_lock_irqsave(&port->lock, flags); if (!tty_hung_up_p(filp)) { port->count++; } port->blocked_open--; if (retval == 0) port->flags |= ASYNC_NORMAL_ACTIVE; spin_unlock_irqrestore(&port->lock, flags); func_exit (); return retval; }
/* * fetch file status from the volume * - don't issue a fetch if: * - the changed bit is not set and there's a valid callback * - there are any outstanding ops that will fetch the status * - TODO implement local caching */ int afs_vnode_fetch_status(struct afs_vnode *vnode, struct afs_vnode *auth_vnode, struct key *key) { struct afs_server *server; unsigned long acl_order; int ret; DECLARE_WAITQUEUE(myself, current); _enter("%s,{%x:%u.%u}", vnode->volume->vlocation->vldb.name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && vnode->cb_promised) { _leave(" [unchanged]"); return 0; } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _leave(" [deleted]"); return -ENOENT; } acl_order = 0; if (auth_vnode) acl_order = auth_vnode->acl_order; spin_lock(&vnode->lock); if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && vnode->cb_promised) { spin_unlock(&vnode->lock); _leave(" [unchanged]"); return 0; } ASSERTCMP(vnode->update_cnt, >=, 0); if (vnode->update_cnt > 0) { /* someone else started a fetch */ _debug("wait on fetch %d", vnode->update_cnt); set_current_state(TASK_UNINTERRUPTIBLE); ASSERT(myself.func != NULL); add_wait_queue(&vnode->update_waitq, &myself); /* wait for the status to be updated */ for (;;) { if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) break; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) break; /* check to see if it got updated and invalidated all * before we saw it */ if (vnode->update_cnt == 0) { remove_wait_queue(&vnode->update_waitq, &myself); set_current_state(TASK_RUNNING); goto get_anyway; } spin_unlock(&vnode->lock); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); spin_lock(&vnode->lock); } remove_wait_queue(&vnode->update_waitq, &myself); spin_unlock(&vnode->lock); set_current_state(TASK_RUNNING); return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ENOENT : 0; } get_anyway: /* okay... we're going to have to initiate the op */ vnode->update_cnt++; spin_unlock(&vnode->lock); /* merge AFS status fetches and clear outstanding callback on this * vnode */ do { /* pick a server to query */ server = afs_volume_pick_fileserver(vnode); if (IS_ERR(server)) goto no_server; _debug("USING SERVER: %p{%08x}", server, ntohl(server->addr.s_addr)); ret = afs_fs_fetch_file_status(server, key, vnode, NULL, &afs_sync_call); } while (!afs_volume_release_fileserver(vnode, server, ret)); /* adjust the flags */ if (ret == 0) { _debug("adjust"); if (auth_vnode) afs_cache_permit(vnode, key, acl_order); afs_vnode_finalise_status_update(vnode, server); afs_put_server(server); } else { _debug("failed [%d]", ret); afs_vnode_status_update_failed(vnode, ret); } ASSERTCMP(vnode->update_cnt, >=, 0); _leave(" = %d [cnt %d]", ret, vnode->update_cnt); return ret; no_server: spin_lock(&vnode->lock); vnode->update_cnt--; ASSERTCMP(vnode->update_cnt, >=, 0); spin_unlock(&vnode->lock); _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt); return PTR_ERR(server); }
/* n_hdlc_tty_write() * * write a single frame of data to device * * Arguments: tty pointer to associated tty device instance data * file pointer to file object data * data pointer to transmit data (one frame) * count size of transmit frame in bytes * * Return Value: number of bytes written (or error code) */ static rw_ret_t n_hdlc_tty_write (struct tty_struct *tty, struct file *file, const __u8 * data, rw_count_t count) { struct n_hdlc *n_hdlc = tty2n_hdlc (tty); int error = 0; DECLARE_WAITQUEUE(wait, current); N_HDLC_BUF *tbuf; if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_write() called count=%d\n", __FILE__,__LINE__,count); /* Verify pointers */ if (!n_hdlc) return -EIO; if (n_hdlc->magic != HDLC_MAGIC) return -EIO; /* verify frame size */ if (count > maxframe ) { if (debuglevel & DEBUG_LEVEL_INFO) printk (KERN_WARNING "n_hdlc_tty_write: truncating user packet " "from %lu to %d\n", (unsigned long) count, maxframe ); count = maxframe; } add_wait_queue(&n_hdlc->write_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); /* Allocate transmit buffer */ /* sleep until transmit buffer available */ while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) { schedule(); n_hdlc = tty2n_hdlc (tty); if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || tty != n_hdlc->tty) { printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc); error = -EIO; break; } if (signal_pending(current)) { error = -EINTR; break; } } set_current_state(TASK_RUNNING); remove_wait_queue(&n_hdlc->write_wait, &wait); if (!error) { /* Retrieve the user's buffer */ COPY_FROM_USER (error, tbuf->buf, data, count); if (error) { /* return tx buffer to free list */ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,tbuf); } else { /* Send the data */ tbuf->count = error = count; n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); n_hdlc_send_frames(n_hdlc,tty); } } return error; } /* end of n_hdlc_tty_write() */
/* queue worker thread */ static int cyasblkdev_queue_thread(void *d) { DECLARE_WAITQUEUE(wait, current); struct cyasblkdev_queue *bq = d; struct request_queue *q = bq->queue; u32 qth_pid; DBGPRN_FUNC_NAME; /* * set iothread to ensure that we aren't put to sleep by * the process freezing. we handle suspension ourselves. */ daemonize("cyasblkdev_queue_thread"); /* signal to queue_init() so it could contnue */ complete(&bq->thread_complete); down(&bq->thread_sem); add_wait_queue(&bq->thread_wq, &wait); qth_pid = current->pid; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s:%x started, bq:%p, q:%p\n", __func__, qth_pid, bq, q); #endif do { struct request *req = NULL; /* the thread wants to be woken up by signals as well */ set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(q->queue_lock); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: for bq->queue is null\n", __func__); #endif if (!bq->req) { /* chk if queue is plugged */ if (!blk_queue_plugged(q)) { bq->req = req = blk_fetch_request(q); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: blk_fetch_request:%x\n", __func__, (uint32_t)req); #endif } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: queue plugged, " "skip blk_fetch()\n", __func__); #endif } } spin_unlock_irq(q->queue_lock); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: checking if request queue is null\n", __func__); #endif if (!req) { if (bq->flags & CYASBLKDEV_QUEUE_EXIT) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s:got QUEUE_EXIT flag\n", __func__); #endif break; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: request queue is null, goto sleep, " "thread_sem->count=%d\n", __func__, bq->thread_sem.count); if (spin_is_locked(q->queue_lock)) { cy_as_hal_print_message("%s: queue_lock " "is locked, need to release\n", __func__); spin_unlock(q->queue_lock); if (spin_is_locked(q->queue_lock)) cy_as_hal_print_message( "%s: unlock did not work\n", __func__); } else { cy_as_hal_print_message( "%s: checked lock, is not locked\n", __func__); } #endif up(&bq->thread_sem); /* yields to the next rdytorun proc, * then goes back to sleep*/ schedule(); down(&bq->thread_sem); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: wake_up,continue\n", __func__); #endif continue; } /* new req received, issue it to the driver */ set_current_state(TASK_RUNNING); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: issued a RQ:%x\n", __func__, (uint32_t)req); #endif bq->issue_fn(bq, req); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: bq->issue_fn() returned\n", __func__); #endif } while (1); set_current_state(TASK_RUNNING); remove_wait_queue(&bq->thread_wq, &wait); up(&bq->thread_sem); complete_and_exit(&bq->thread_complete, 0); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: is finished\n", __func__); #endif return 0; }
/* * "read" file op */ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos) { DECLARE_WAITQUEUE(wait, current); struct hiddev_list *list = file->private_data; int event_size; int retval = 0; event_size = ((list->flags & HIDDEV_FLAG_UREF) != 0) ? sizeof(struct hiddev_usage_ref) : sizeof(struct hiddev_event); if (count < event_size) return 0; while (retval == 0) { if (list->head == list->tail) { add_wait_queue(&list->hiddev->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (!list->hiddev->exist) { retval = -EIO; break; } schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&list->hiddev->wait, &wait); } if (retval) return retval; while (list->head != list->tail && retval + event_size <= count) { if ((list->flags & HIDDEV_FLAG_UREF) == 0) { if (list->buffer[list->tail].field_index != HID_FIELD_INDEX_NONE) { struct hiddev_event event; event.hid = list->buffer[list->tail].usage_code; event.value = list->buffer[list->tail].value; if (copy_to_user(buffer + retval, &event, sizeof(struct hiddev_event))) return -EFAULT; retval += sizeof(struct hiddev_event); } } else { if (list->buffer[list->tail].field_index != HID_FIELD_INDEX_NONE || (list->flags & HIDDEV_FLAG_REPORT) != 0) { if (copy_to_user(buffer + retval, list->buffer + list->tail, sizeof(struct hiddev_usage_ref))) return -EFAULT; retval += sizeof(struct hiddev_usage_ref); } } list->tail = (list->tail + 1) & (HIDDEV_BUFFER_SIZE - 1); } } return retval; }
static ssize_t adu_write(struct file *file, const __user char *buffer, size_t count, loff_t *ppos) { DECLARE_WAITQUEUE(waita, current); struct adu_device *dev; size_t bytes_written = 0; size_t bytes_to_write; size_t buffer_size; unsigned long flags; int retval; dbg(2," %s : enter, count = %Zd", __func__, count); dev = file->private_data; retval = mutex_lock_interruptible(&dev->mtx); if (retval) goto exit_nolock; if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } if (count == 0) { dbg(1," %s : write request of 0 bytes", __func__); goto exit; } while (count > 0) { add_wait_queue(&dev->write_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); mutex_unlock(&dev->mtx); if (signal_pending(current)) { dbg(1," %s : interrupted", __func__); set_current_state(TASK_RUNNING); retval = -EINTR; goto exit_onqueue; } if (schedule_timeout(COMMAND_TIMEOUT) == 0) { dbg(1, "%s - command timed out.", __func__); retval = -ETIMEDOUT; goto exit_onqueue; } remove_wait_queue(&dev->write_wait, &waita); retval = mutex_lock_interruptible(&dev->mtx); if (retval) { retval = bytes_written ? bytes_written : retval; goto exit_nolock; } dbg(4," %s : in progress, count = %Zd", __func__, count); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->write_wait, &waita); dbg(4," %s : sending, count = %Zd", __func__, count); buffer_size = le16_to_cpu(dev->interrupt_out_endpoint->wMaxPacketSize); bytes_to_write = count > buffer_size ? buffer_size : count; dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd", __func__, buffer_size, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) { retval = -EFAULT; goto exit; } usb_fill_int_urb( dev->interrupt_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress), dev->interrupt_out_buffer, bytes_to_write, adu_interrupt_out_callback, dev, dev->interrupt_out_endpoint->bInterval); dev->interrupt_out_urb->actual_length = bytes_to_write; dev->out_urb_finished = 0; retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL); if (retval < 0) { dev->out_urb_finished = 1; dev_err(&dev->udev->dev, "Couldn't submit " "interrupt_out_urb %d\n", retval); goto exit; } buffer += bytes_to_write; count -= bytes_to_write; bytes_written += bytes_to_write; } } mutex_unlock(&dev->mtx); return bytes_written; exit: mutex_unlock(&dev->mtx); exit_nolock: dbg(2," %s : leave, return value %d", __func__, retval); return retval; exit_onqueue: remove_wait_queue(&dev->write_wait, &waita); return retval; }
/* * ------------------------------------------------------------ * mcfrs_open() and friends * ------------------------------------------------------------ */ static int block_til_ready(struct tty_struct *tty, struct file * filp, struct mcf_serial *info) { DECLARE_WAITQUEUE(wait, current); int retval; int do_clocal = 0; /* * If the device is in the middle of being closed, then block * until it's done, and then try again. */ if (info->flags & ASYNC_CLOSING) { interruptible_sleep_on(&info->close_wait); #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) return -EAGAIN; else return -ERESTARTSYS; #else return -EAGAIN; #endif } /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { info->flags |= ASYNC_NORMAL_ACTIVE; return 0; } if (tty->termios->c_cflag & CLOCAL) do_clocal = 1; /* * Block waiting for the carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, info->count is dropped by one, so that * mcfrs_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&info->open_wait, &wait); #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready before block: ttyS%d, count = %d\n", info->line, info->count); #endif info->count--; info->blocked_open++; while (1) { local_irq_disable(); mcfrs_setsignals(info, 1, 1); local_irq_enable(); current->state = TASK_INTERRUPTIBLE; if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) { #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) retval = -EAGAIN; else retval = -ERESTARTSYS; #else retval = -EAGAIN; #endif break; } if (!(info->flags & ASYNC_CLOSING) && (do_clocal || (mcfrs_getsignals(info) & TIOCM_CD))) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready blocking: ttyS%d, count = %d\n", info->line, info->count); #endif schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&info->open_wait, &wait); if (!tty_hung_up_p(filp)) info->count++; info->blocked_open--; #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready after blocking: ttyS%d, count = %d\n", info->line, info->count); #endif if (retval) return retval; info->flags |= ASYNC_NORMAL_ACTIVE; return 0; }
static void ad1889_update_ptr(ad1889_dev_t *dev, int wake) { ad1889_state_t *state; struct dmabuf *dmabuf; unsigned long hwptr; int diff; /* check ADC first */ state = &dev->adc_state; dmabuf = &state->dmabuf; if (dmabuf->enable & ADC_RUNNING) { hwptr = ad1889_get_dma_addr(state); diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize; dmabuf->hwptr = hwptr; dmabuf->total_bytes += diff; dmabuf->count += diff; if (dmabuf->count > dmabuf->dmasize) dmabuf->count = dmabuf->dmasize; if (dmabuf->mapped) { if (wake & dmabuf->count >= dmabuf->fragsize) wake_up(&dmabuf->wait); } else { if (wake & dmabuf->count > 0) wake_up(&dmabuf->wait); } } /* check DAC */ state = &dev->wav_state; dmabuf = &state->dmabuf; if (dmabuf->enable & DAC_RUNNING) { XXX } #endif /************************* /dev/dsp interfaces ************************* */ static ssize_t ad1889_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { return 0; } static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { ad1889_dev_t *dev = (ad1889_dev_t *)file->private_data; ad1889_state_t *state = &dev->state[AD_WAV_STATE]; volatile struct dmabuf *dmabuf = &state->dmabuf; ssize_t ret = 0; DECLARE_WAITQUEUE(wait, current); mutex_lock(&state->mutex); #if 0 if (dmabuf->mapped) { ret = -ENXIO; goto err1; } #endif if (!access_ok(VERIFY_READ, buffer, count)) { ret = -EFAULT; goto err1; } add_wait_queue(&state->dmabuf.wait, &wait); /* start filling dma buffer.... */ while (count > 0) { long rem; long cnt = count; unsigned long flags; for (;;) { long used_bytes; long timeout; /* max time for DMA in jiffies */ /* buffer is full if wr catches up to rd */ spin_lock_irqsave(&state->card->lock, flags); used_bytes = dmabuf->wr_ptr - dmabuf->rd_ptr; timeout = (dmabuf->dma_len * HZ) / dmabuf->rate; spin_unlock_irqrestore(&state->card->lock, flags); /* adjust for buffer wrap around */ used_bytes = (used_bytes + DMA_SIZE) & (DMA_SIZE - 1); /* If at least one page unused */ if (used_bytes < (DMA_SIZE - 0x1000)) break; /* dma buffer full */ if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto err2; } set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(timeout + 1); if (signal_pending(current)) { ret = -ERESTARTSYS; goto err2; } } /* watch out for wrapping around static buffer */ spin_lock_irqsave(&state->card->lock, flags); rem = DMA_SIZE - dmabuf->wr_ptr; if (cnt > rem) cnt = rem; rem = dmabuf->wr_ptr; /* update dma pointers */ dmabuf->wr_ptr += cnt; dmabuf->wr_ptr &= DMA_SIZE - 1; /* wrap ptr if necessary */ spin_unlock_irqrestore(&state->card->lock, flags); /* transfer unwrapped chunk */ if (copy_from_user(dmabuf->rawbuf + rem, buffer, cnt)) { ret = -EFAULT; goto err2; } DBG("Writing 0x%lx bytes to +0x%lx\n", cnt, rem); /* update counters */ count -= cnt; buffer += cnt; ret += cnt; /* we have something to play - go play it! */ ad1889_trigger_playback(dev); } err2: remove_wait_queue(&state->dmabuf.wait, &wait); err1: mutex_unlock(&state->mutex); return ret; }
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) { struct sock *sk; int len = skb->len; int protocol = ssk->protocol; long timeo; DECLARE_WAITQUEUE(wait, current); timeo = sock_sndtimeo(ssk, nonblock); retry: sk = netlink_lookup(protocol, pid); if (sk == NULL) goto no_dst; /* Don't bother queuing skb if kernel socket has no input function */ if (sk->protinfo.af_netlink->pid == 0 && !sk->protinfo.af_netlink->data_ready) goto no_dst; #ifdef NL_EMULATE_DEV if (sk->protinfo.af_netlink->handler) { skb_orphan(skb); len = sk->protinfo.af_netlink->handler(protocol, skb); sock_put(sk); return len; } #endif if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf || test_bit(0, &sk->protinfo.af_netlink->state)) { if (!timeo) { if (ssk->protinfo.af_netlink->pid == 0) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&sk->protinfo.af_netlink->wait, &wait); if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf || test_bit(0, &sk->protinfo.af_netlink->state)) && !sk->dead) timeo = schedule_timeout(timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(timeo); } goto retry; } skb_orphan(skb); skb_set_owner_r(skb, sk); skb_queue_tail(&sk->receive_queue, skb); sk->data_ready(sk, len); sock_put(sk); return len; no_dst: kfree_skb(skb); return -ECONNREFUSED; }
static int usb_rtusb_close_device( IN PRTMP_ADAPTER pAd) { int i = 0; int ret; DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup); DECLARE_WAITQUEUE (wait, current); DBGPRINT(RT_DEBUG_TRACE, "-->usb_rtusb_close_device \n"); // ensure there are no more active urbs. add_wait_queue (&unlink_wakeup, &wait); pAd->wait = &unlink_wakeup; // maybe wait for deletions to finish. while ((i < 25) && atomic_read(&pAd->PendingRx) > 0) { #if LINUX_VERSION_CODE >KERNEL_VERSION(2,6,9) msleep(UNLINK_TIMEOUT_MS); #endif i++; } pAd->wait = NULL; remove_wait_queue (&unlink_wakeup, &wait); if (pAd->MLMEThr_pid >= 0) { mlme_kill = 1; RTUSBMlmeUp(pAd); wmb(); // need to check ret = KILL_THREAD_PID (pAd->MLMEThr_pid, SIGTERM, 1); if (ret) { printk (KERN_WARNING "%s: unable to Mlme thread \n", pAd->net_dev->name); } wait_for_completion (&pAd->MlmeThreadNotify); // reset mlme thread pAd->MLMEThr_pid = THREAD_PID_INIT_VALUE; } if (pAd->RTUSBCmdThr_pid>= 0) { RTUSBCmd_kill = 1; RTUSBCMDUp(pAd); wmb(); // need to check ret = KILL_THREAD_PID (pAd->RTUSBCmdThr_pid, SIGTERM, 1); if (ret) { printk (KERN_WARNING "%s: unable to RTUSBCmd thread \n", pAd->net_dev->name); } wait_for_completion (&pAd->CmdThreadNotify); // reset cmd thread pAd->RTUSBCmdThr_pid= THREAD_PID_INIT_VALUE; } RTUSBHalt(pAd, TRUE); RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_REMOVE_IN_PROGRESS); tasklet_kill(&pAd->rx_bh); DBGPRINT(RT_DEBUG_TRACE,"<--usb_rtusb_close_device \n"); return 0; }
/* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ int dm_suspend(struct mapped_device *md) { struct dm_table *map = NULL; DECLARE_WAITQUEUE(wait, current); int r = -EINVAL; down(&md->suspend_lock); if (dm_suspended(md)) goto out; map = dm_get_table(md); /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); /* Flush I/O to the device. */ r = lock_fs(md); if (r) goto out; /* * First we set the BLOCK_IO flag so no more ios will be mapped. */ down_write(&md->io_lock); set_bit(DMF_BLOCK_IO, &md->flags); add_wait_queue(&md->wait, &wait); up_write(&md->io_lock); /* unplug */ if (map) dm_table_unplug_all(map); /* * Then we wait for the already mapped ios to * complete. */ while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&md->pending) || signal_pending(current)) break; io_schedule(); } set_current_state(TASK_RUNNING); down_write(&md->io_lock); remove_wait_queue(&md->wait, &wait); /* were we interrupted ? */ r = -EINTR; if (atomic_read(&md->pending)) { up_write(&md->io_lock); unlock_fs(md); clear_bit(DMF_BLOCK_IO, &md->flags); goto out; } up_write(&md->io_lock); dm_table_postsuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); r = 0; out: dm_table_put(map); up(&md->suspend_lock); return r; }
static void acpi_bus_osc_support(void) { u32 capbuf[2]; struct acpi_osc_context context = { .uuid_str = sb_uuid_str, .rev = 1, .cap.length = 8, .cap.pointer = capbuf, }; acpi_handle handle; capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; #endif #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; #endif if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) return; if (is_uv_system() && is_kdump_kernel()) { /* * There is no need to parse the OS Capabilities table * in the crash kernel. And it should not be done, as * that parsing includes destructive writes to io ports to * initialize UV system controller interrupts. */ return; } if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) kfree(context.ret.pointer); /* do we need to check the returned cap? Sounds no */ } /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ #ifdef CONFIG_ACPI_PROC_EVENT static DEFINE_SPINLOCK(acpi_bus_event_lock); LIST_HEAD(acpi_bus_event_list); DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue); extern int event_is_open; int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data) { struct acpi_bus_event *event; unsigned long flags = 0; /* drop event on the floor if no one's listening */ if (!event_is_open) return 0; event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC); if (!event) return -ENOMEM; strcpy(event->device_class, device_class); strcpy(event->bus_id, bus_id); event->type = type; event->data = data; spin_lock_irqsave(&acpi_bus_event_lock, flags); list_add_tail(&event->node, &acpi_bus_event_list); spin_unlock_irqrestore(&acpi_bus_event_lock, flags); wake_up_interruptible(&acpi_bus_event_queue); return 0; } EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4); int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) { if (!device) return -EINVAL; return acpi_bus_generate_proc_event4(device->pnp.device_class, device->pnp.bus_id, type, data); } EXPORT_SYMBOL(acpi_bus_generate_proc_event); int acpi_bus_receive_event(struct acpi_bus_event *event) { unsigned long flags = 0; struct acpi_bus_event *entry = NULL; DECLARE_WAITQUEUE(wait, current); if (!event) return -EINVAL; if (list_empty(&acpi_bus_event_list)) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&acpi_bus_event_queue, &wait); if (list_empty(&acpi_bus_event_list)) schedule(); remove_wait_queue(&acpi_bus_event_queue, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; } spin_lock_irqsave(&acpi_bus_event_lock, flags); if (!list_empty(&acpi_bus_event_list)) { entry = list_entry(acpi_bus_event_list.next, struct acpi_bus_event, node); list_del(&entry->node); }
static int axusbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct driver_info *info = dev->driver_info; int temp; int retval; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup); #else DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup); #endif DECLARE_WAITQUEUE (wait, current); netif_stop_queue (net); if (netif_msg_ifdown (dev)) devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", dev->stats.rx_packets, dev->stats.tx_packets, dev->stats.rx_errors, dev->stats.tx_errors ); /* allow minidriver to stop correctly (wireless devices to turn off * radio etc) */ if (info->stop) { retval = info->stop(dev); if (retval < 0 && netif_msg_ifdown(dev)) devinfo(dev, "stop fail (%d) usbnet usb-%s-%s, %s", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) { /* ensure there are no more active urbs */ add_wait_queue(&unlink_wakeup, &wait); dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ while (!skb_queue_empty(&dev->rxq) && !skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->done)) { msleep(UNLINK_TIMEOUT_MS); if (netif_msg_ifdown(dev)) devdbg(dev, "waited for %d urb completions", temp); } dev->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); } usb_kill_urb(dev->interrupt); /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. */ dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); return 0; }
int nvram_commit(void) { char *buf; size_t erasesize, len, magic_len; unsigned int i; int ret; struct nvram_header *header; unsigned long flags; u_int32_t offset; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; struct erase_info erase; u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */ if (!nvram_mtd) { printk("nvram_commit: NVRAM not found\n"); return -ENODEV; } if (in_interrupt()) { printk("nvram_commit: not committing in interrupt\n"); return -EINVAL; } /* Backup sector blocks to be erased */ erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize); if (!(buf = kmalloc(erasesize, GFP_KERNEL))) { printk("nvram_commit: out of memory\n"); return -ENOMEM; } down(&nvram_sem); if ((i = erasesize - NVRAM_SPACE) > 0) { offset = nvram_mtd->size - erasesize; len = 0; ret = MTD_READ(nvram_mtd, offset, i, &len, buf); if (ret || len != i) { printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i); ret = -EIO; goto done; } header = (struct nvram_header *)(buf + i); magic_offset = i + ((void *)&header->magic - (void *)header); } else { offset = nvram_mtd->size - NVRAM_SPACE; magic_offset = ((void *)&header->magic - (void *)header); header = (struct nvram_header *)buf; } /* clear the existing magic # to mark the NVRAM as unusable we can pull MAGIC bits low without erase */ header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */ /* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */ if(nvram_mtd->unlock) nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize); ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), &magic_len, (char *)&header->magic); if (ret || magic_len != sizeof(header->magic)) { printk("nvram_commit: clear MAGIC error\n"); ret = -EIO; goto done; } header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM, otherwise we'll have an incorrect CRC */ /* Regenerate NVRAM */ spin_lock_irqsave(&nvram_lock, flags); ret = _nvram_commit(header); spin_unlock_irqrestore(&nvram_lock, flags); if (ret) goto done; /* Erase sector blocks */ init_waitqueue_head(&wait_q); for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) { erase.mtd = nvram_mtd; erase.addr = offset; erase.len = nvram_mtd->erasesize; erase.callback = erase_callback; erase.priv = (u_long) &wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); /* Unlock sector blocks */ if (nvram_mtd->unlock) nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize); if ((ret = MTD_ERASE(nvram_mtd, &erase))) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk("nvram_commit: erase error\n"); goto done; } /* Wait for erase to finish */ schedule(); remove_wait_queue(&wait_q, &wait); } /* Write partition up to end of data area */ header->magic = NVRAM_INVALID_MAGIC; /* All ones magic */ offset = nvram_mtd->size - erasesize; i = erasesize - NVRAM_SPACE + header->len; ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf); if (ret || len != i) { printk("nvram_commit: write error\n"); ret = -EIO; goto done; } /* Now mark the NVRAM in flash as "valid" by setting the correct MAGIC # */ header->magic = NVRAM_MAGIC; ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), &magic_len, (char *)&header->magic); if (ret || magic_len != sizeof(header->magic)) { printk("nvram_commit: write MAGIC error\n"); ret = -EIO; goto done; } /* * Reading a few bytes back here will put the device * back to the correct mode on certain flashes */ offset = nvram_mtd->size - erasesize; ret = MTD_READ(nvram_mtd, offset, 4, &len, buf); done: up(&nvram_sem); kfree(buf); return ret; }
static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, loff_t *ppos) { struct adu_device *dev; size_t bytes_read = 0; size_t bytes_to_read = count; int i; int retval = 0; int timeout = 0; int should_submit = 0; unsigned long flags; DECLARE_WAITQUEUE(wait, current); dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file); dev = file->private_data; dbg(2," %s : dev=%p", __func__, dev); if (mutex_lock_interruptible(&dev->mtx)) return -ERESTARTSYS; /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } /* verify that some data was requested */ if (count == 0) { dbg(1," %s : read request of 0 bytes", __func__); goto exit; } timeout = COMMAND_TIMEOUT; dbg(2," %s : about to start looping", __func__); while (bytes_to_read) { int data_in_secondary = dev->secondary_tail - dev->secondary_head; dbg(2," %s : while, data_in_secondary=%d, status=%d", __func__, data_in_secondary, dev->interrupt_in_urb->status); if (data_in_secondary) { /* drain secondary buffer */ int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); if (i) { retval = -EFAULT; goto exit; } dev->secondary_head += (amount - i); bytes_read += (amount - i); bytes_to_read -= (amount - i); if (i) { retval = bytes_read ? bytes_read : -EFAULT; goto exit; } } else { /* we check the primary buffer */ spin_lock_irqsave (&dev->buflock, flags); if (dev->read_buffer_length) { /* we secure access to the primary */ char *tmp; dbg(2," %s : swap, read_buffer_length = %d", __func__, dev->read_buffer_length); tmp = dev->read_buffer_secondary; dev->read_buffer_secondary = dev->read_buffer_primary; dev->read_buffer_primary = tmp; dev->secondary_head = 0; dev->secondary_tail = dev->read_buffer_length; dev->read_buffer_length = 0; spin_unlock_irqrestore(&dev->buflock, flags); /* we have a free buffer so use it */ should_submit = 1; } else { /* even the primary was empty - we may need to do IO */ if (!dev->read_urb_finished) { /* somebody is doing IO */ spin_unlock_irqrestore(&dev->buflock, flags); dbg(2," %s : submitted already", __func__); } else { /* we must initiate input */ dbg(2," %s : initiate input", __func__); dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); if (retval) { dev->read_urb_finished = 1; if (retval == -ENOMEM) { retval = bytes_read ? bytes_read : -ENOMEM; } dbg(2," %s : submit failed", __func__); goto exit; } } /* we wait for I/O to complete */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&dev->read_wait, &wait); spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); timeout = schedule_timeout(COMMAND_TIMEOUT); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); } remove_wait_queue(&dev->read_wait, &wait); if (timeout <= 0) { dbg(2," %s : timeout", __func__); retval = bytes_read ? bytes_read : -ETIMEDOUT; goto exit; } if (signal_pending(current)) { dbg(2," %s : signal pending", __func__); retval = bytes_read ? bytes_read : -EINTR; goto exit; } } } } retval = bytes_read; /* if the primary buffer is empty then use it */ spin_lock_irqsave(&dev->buflock, flags); if (should_submit && dev->read_urb_finished) { dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0) dev->read_urb_finished = 1; /* we ignore failure */ } else { spin_unlock_irqrestore(&dev->buflock, flags); } exit: /* unlock the device */ mutex_unlock(&dev->mtx); dbg(2," %s : leave, return value %d", __func__, retval); return retval; }
/** * n_hdlc_tty_read - Called to retrieve one frame of data (if available) * @tty - pointer to tty instance data * @file - pointer to open file object * @buf - pointer to returned data buffer * @nr - size of returned data buffer * * Returns the number of bytes returned or error code. */ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, __u8 __user *buf, size_t nr) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); int ret = 0; struct n_hdlc_buf *rbuf; DECLARE_WAITQUEUE(wait, current); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__); /* Validate the pointers */ if (!n_hdlc) return -EIO; /* verify user access to buffer */ if (!access_ok(buf, nr)) { printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user " "buffer\n", __FILE__, __LINE__); return -EFAULT; } add_wait_queue(&tty->read_wait, &wait); for (;;) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { ret = -EIO; break; } if (tty_hung_up_p(file)) break; set_current_state(TASK_INTERRUPTIBLE); rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); if (rbuf) { if (rbuf->count > nr) { /* too large for caller's buffer */ ret = -EOVERFLOW; } else { __set_current_state(TASK_RUNNING); if (copy_to_user(buf, rbuf->buf, rbuf->count)) ret = -EFAULT; else ret = rbuf->count; } if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT) kfree(rbuf); else n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf); break; } /* no data */ if (tty_io_nonblock(tty, file)) { ret = -EAGAIN; break; } schedule(); if (signal_pending(current)) { ret = -EINTR; break; } } remove_wait_queue(&tty->read_wait, &wait); __set_current_state(TASK_RUNNING); return ret; } /* end of n_hdlc_tty_read() */
static int cfe_commit(void) { DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; struct erase_info erase; int ret = 0; size_t erasesize, len=0; u_int32_t offset; if(cfe_mtd == NULL||cfe_buf == NULL) { printk("cfe_commit: do nothing\n"); return 0; } #if 1 if (in_interrupt()) { printk("cfe_commit: not committing in interrupt\n"); return -EINVAL; } down(&nvram_sem); /* Backup sector blocks to be erased */ erasesize = ROUNDUP(CFE_NVRAM_SPACE, cfe_mtd->erasesize); #ifndef CONFIG_MTD_NFLASH /* Erase sector blocks */ init_waitqueue_head(&wait_q); for (offset=cfe_offset;offset < cfe_offset+erasesize;offset += cfe_mtd->erasesize) { printk("cfe_commit: ERASE sector block offset(%08x) cfe_mtd->erasesize(%08x)\n", offset, cfe_mtd->erasesize); erase.mtd = cfe_mtd; erase.addr = offset; erase.len = cfe_mtd->erasesize; erase.callback = erase_callback; erase.priv = (u_long) &wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); /* Unlock sector blocks */ if (cfe_mtd->unlock) cfe_mtd->unlock(cfe_mtd, offset, cfe_mtd->erasesize); if ((ret = cfe_mtd->erase(cfe_mtd, &erase))) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk("\n\n!!!!! cfe_commit: erase error:%d\n\n\n", ret); ret = -EIO; goto done; } /* Wait for erase to finish */ schedule(); remove_wait_queue(&wait_q, &wait); } #endif ret = cfe_mtd->write(cfe_mtd, cfe_offset, erasesize, &len, cfe_buf); if (ret || len != erasesize) { printk("cfe_commit: write error\n"); ret = -EIO; } done: up(&nvram_sem); if (cfe_mtd != NULL) { put_mtd_device(cfe_mtd); cfe_mtd=NULL; } if(cfe_buf != NULL) { kfree(cfe_buf); cfe_buf=NULL; } printk("cfe_commit: done %d\n", ret); return ret; #endif }
static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned long ioarg) { unsigned int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; void __user *arg = (void __user *)ioarg; PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n", file, iocmd, (int) ioarg); switch (iocmd) { case IOCTL_MW_RESET: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " calling tp3780I_ResetDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_ResetDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " retval %x from tp3780I_ResetDSP\n", retval); break; case IOCTL_MW_RUN: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " calling tp3780I_StartDSP\n"); mutex_lock(&mwave_mutex); retval = tp3780I_StartDSP(&pDrvData->rBDData); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " retval %x from tp3780I_StartDSP\n", retval); break; case IOCTL_MW_DSP_ABILITIES: { MW_ABILITIES rAbilities; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl," " IOCTL_MW_DSP_ABILITIES calling" " tp3780I_QueryAbilities\n"); mutex_lock(&mwave_mutex); retval = tp3780I_QueryAbilities(&pDrvData->rBDData, &rAbilities); mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " retval %x from tp3780I_QueryAbilities\n", retval); if (retval == 0) { if( copy_to_user(arg, &rAbilities, sizeof(MW_ABILITIES)) ) return -EFAULT; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " exit retval %x\n", retval); } break; case IOCTL_MW_READ_DATA: case IOCTL_MW_READCLEAR_DATA: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength, rReadData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_READ_INST: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_INST," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength / 2, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength / 2, rReadData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_WRITE_DATA: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_WRITE_INST: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *)(rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_REGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_REGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x exit\n", ipcnum); } break; case IOCTL_MW_GET_IPC: { unsigned int ipcnum = (unsigned int) ioarg; if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_GET_IPC: Error:" " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { DECLARE_WAITQUEUE(wait, current); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, thread for" " ipc %x going to sleep\n", ipcnum); add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); pDrvData->IPCs[ipcnum].bIsHere = TRUE; set_current_state(TASK_INTERRUPTIBLE); if (pDrvData->IPCs[ipcnum].usIntCount == 1) { pDrvData->IPCs[ipcnum].usIntCount = 2; PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " handling first int\n", ipcnum); } else { schedule(); if (pDrvData->IPCs[ipcnum].usIntCount == 1) { pDrvData->IPCs[ipcnum].usIntCount = 2; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " woke up and returning to" " application\n", ipcnum); } pDrvData->IPCs[ipcnum].bIsHere = FALSE; remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); set_current_state(TASK_RUNNING); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC," " returning thread for ipc %x" " processing\n", ipcnum); } mutex_unlock(&mwave_mutex); } break; case IOCTL_MW_UNREGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC" " ipcnum %x\n", ipcnum); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_UNREGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) { wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); } } mutex_unlock(&mwave_mutex); } break; default: return -ENOTTY; break; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); return retval; }
static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, loff_t *ppos) { struct adu_device *dev; size_t bytes_read = 0; size_t bytes_to_read = count; int i; int retval = 0; int timeout = 0; int should_submit = 0; unsigned long flags; DECLARE_WAITQUEUE(wait, current); dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file); dev = file->private_data; dbg(2," %s : dev=%p", __func__, dev); if (mutex_lock_interruptible(&dev->mtx)) return -ERESTARTSYS; if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } if (count == 0) { dbg(1," %s : read request of 0 bytes", __func__); goto exit; } timeout = COMMAND_TIMEOUT; dbg(2," %s : about to start looping", __func__); while (bytes_to_read) { int data_in_secondary = dev->secondary_tail - dev->secondary_head; dbg(2," %s : while, data_in_secondary=%d, status=%d", __func__, data_in_secondary, dev->interrupt_in_urb->status); if (data_in_secondary) { int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); if (i < 0) { retval = -EFAULT; goto exit; } dev->secondary_head += (amount - i); bytes_read += (amount - i); bytes_to_read -= (amount - i); if (i) { retval = bytes_read ? bytes_read : -EFAULT; goto exit; } } else { spin_lock_irqsave (&dev->buflock, flags); if (dev->read_buffer_length) { char *tmp; dbg(2," %s : swap, read_buffer_length = %d", __func__, dev->read_buffer_length); tmp = dev->read_buffer_secondary; dev->read_buffer_secondary = dev->read_buffer_primary; dev->read_buffer_primary = tmp; dev->secondary_head = 0; dev->secondary_tail = dev->read_buffer_length; dev->read_buffer_length = 0; spin_unlock_irqrestore(&dev->buflock, flags); should_submit = 1; } else { if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); dbg(2," %s : submitted already", __func__); } else { dbg(2," %s : initiate input", __func__); dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); if (retval) { dev->read_urb_finished = 1; if (retval == -ENOMEM) { retval = bytes_read ? bytes_read : -ENOMEM; } dbg(2," %s : submit failed", __func__); goto exit; } } set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&dev->read_wait, &wait); spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); timeout = schedule_timeout(COMMAND_TIMEOUT); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); } remove_wait_queue(&dev->read_wait, &wait); if (timeout <= 0) { dbg(2," %s : timeout", __func__); retval = bytes_read ? bytes_read : -ETIMEDOUT; goto exit; } if (signal_pending(current)) { dbg(2," %s : signal pending", __func__); retval = bytes_read ? bytes_read : -EINTR; goto exit; } } } } retval = bytes_read; spin_lock_irqsave(&dev->buflock, flags); if (should_submit && dev->read_urb_finished) { dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0) dev->read_urb_finished = 1; } else { spin_unlock_irqrestore(&dev->buflock, flags); } exit: mutex_unlock(&dev->mtx); dbg(2," %s : leave, return value %d", __func__, retval); return retval; }
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { const unsigned char *b = buf; DECLARE_WAITQUEUE(wait, current); int c; ssize_t retval = 0; if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; } process_echoes(tty); add_wait_queue(&tty->write_wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { retval = -EIO; break; } if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) { while (nr > 0) { ssize_t num = process_output_block(tty, b, nr); if (num < 0) { if (num == -EAGAIN) break; retval = num; goto break_out; } b += num; nr -= num; if (nr == 0) break; c = *b; if (process_output(c, tty) < 0) break; b++; nr--; } if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { while (nr > 0) { mutex_lock(&tty->output_lock); c = tty->ops->write(tty, b, nr); mutex_unlock(&tty->output_lock); if (c < 0) { retval = c; goto break_out; } if (!c) break; b += c; nr -= c; } } if (!nr) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } schedule(); } break_out: __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (b - buf != nr && tty->fasync) set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); return (b - buf) ? b - buf : retval; }
/* Called with alloc sem _and_ erase_completion_lock */ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len) { struct jffs2_eraseblock *jeb = c->nextblock; restart: if (jeb && minsize > jeb->free_size) { /* Skip the end of this block and file it as having some dirty space */ c->dirty_size += jeb->free_size; c->free_size -= jeb->free_size; jeb->dirty_size += jeb->free_size; jeb->free_size = 0; D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); list_add_tail(&jeb->list, &c->dirty_list); c->nextblock = jeb = NULL; } if (!jeb) { struct list_head *next; /* Take the next block off the 'free' list */ if (list_empty(&c->free_list)) { DECLARE_WAITQUEUE(wait, current); if (!c->nr_erasing_blocks) { // if (list_empty(&c->erasing_list) && list_empty(&c->erase_pending_list) && list_empty(c->erase_complete_list)) { /* Ouch. We're in GC, or we wouldn't have got here. And there's no space left. At all. */ printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasingempty: %s, erasependingempty: %s)\n", c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); return -ENOSPC; } /* Make sure this can't deadlock. Someone has to start the erases of erase_pending blocks */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&c->erase_wait, &wait); D1(printk(KERN_DEBUG "Waiting for erases to complete. erasing_blocks is %d. (erasingempty: %s, erasependingempty: %s)\n", c->nr_erasing_blocks, list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no")); if (!list_empty(&c->erase_pending_list)) { D1(printk(KERN_DEBUG "Triggering pending erases\n")); jffs2_erase_pending_trigger(c); } spin_unlock_bh(&c->erase_completion_lock); schedule(); remove_wait_queue(&c->erase_wait, &wait); spin_lock_bh(&c->erase_completion_lock); if (signal_pending(current)) { return -EINTR; } /* An erase may have failed, decreasing the amount of free space available. So we must restart from the beginning */ return -EAGAIN; } next = c->free_list.next; list_del(next); c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); c->nr_free_blocks--; if (jeb->free_size != c->sector_size - sizeof(struct jffs2_unknown_node)) { printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); goto restart; } } /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has enough space */ *ofs = jeb->offset + (c->sector_size - jeb->free_size); *len = jeb->free_size; D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); return 0; }