/* Called under the mutex to grab exclusive access to a drive */ static int grab_drive(struct floppy_state *fs, enum swim_state state, int interruptible) { unsigned long flags; swim3_dbg("%s", "-> grab drive\n"); spin_lock_irqsave(&swim3_lock, flags); if (fs->state != idle && fs->state != available) { ++fs->wanted; /* this will enable irqs in order to sleep */ if (!interruptible) wait_event_lock_irq(fs->wait, fs->state == available, swim3_lock); else if (wait_event_interruptible_lock_irq(fs->wait, fs->state == available, swim3_lock)) { --fs->wanted; spin_unlock_irqrestore(&swim3_lock, flags); return -EINTR; } --fs->wanted; } fs->state = state; spin_unlock_irqrestore(&swim3_lock, flags); return 0; }
/* this gets called when the md device is ready to unplug its underlying * (slave) device queues -- before we let any writes go down, we need to * sync the dirty pages of the bitmap file to disk */ int bitmap_unplug(struct bitmap *bitmap) { unsigned long i, attr, flags; struct page *page; int wait = 0; int err; if (!bitmap) return 0; /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->file_pages; i++) { spin_lock_irqsave(&bitmap->lock, flags); if (!bitmap->filemap) { spin_unlock_irqrestore(&bitmap->lock, flags); return 0; } page = bitmap->filemap[i]; attr = get_page_attr(bitmap, page); clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); if ((attr & BITMAP_PAGE_DIRTY)) wait = 1; spin_unlock_irqrestore(&bitmap->lock, flags); if (attr & (BITMAP_PAGE_DIRTY | BITMAP_PAGE_NEEDWRITE)) { err = write_page(bitmap, page, 0); if (err == -EAGAIN) { if (attr & BITMAP_PAGE_DIRTY) err = write_page(bitmap, page, 1); else err = 0; } if (err) return 1; } } if (wait) { /* if any writes were performed, we need to wait on them */ if (bitmap->file) { spin_lock_irq(&bitmap->write_lock); wait_event_lock_irq(bitmap->write_wait, list_empty(&bitmap->complete_pages), bitmap->write_lock, wake_up_process(bitmap->writeback_daemon->tsk)); spin_unlock_irq(&bitmap->write_lock); } else md_super_wait(bitmap->mddev); } return 0; }
static void histo_stop_streaming(struct vb2_queue *vq) { struct vsp1_histogram *histo = vb2_get_drv_priv(vq); struct vsp1_histogram_buffer *buffer; unsigned long flags; spin_lock_irqsave(&histo->irqlock, flags); /* Remove all buffers from the IRQ queue. */ list_for_each_entry(buffer, &histo->irqqueue, queue) vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&histo->irqqueue); /* Wait for the buffer being read out (if any) to complete. */ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock); spin_unlock_irqrestore(&histo->irqlock, flags); }