Exemplo n.º 1
0
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
                                      struct kvm_run *run)
{
	ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];

	if (run->mmio.len > sizeof(*gpr)) {
		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
		return;
	}

	if (vcpu->arch.mmio_is_bigendian) {
		switch (run->mmio.len) {
		case 4: *gpr = *(u32 *)run->mmio.data; break;
		case 2: *gpr = *(u16 *)run->mmio.data; break;
		case 1: *gpr = *(u8 *)run->mmio.data; break;
		}
	} else {
		/* Convert BE data from userland back to LE. */
		switch (run->mmio.len) {
		case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
		case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
		case 1: *gpr = *(u8 *)run->mmio.data; break;
		}
	}
}
/*
 * update playback/capture pointer from interrupts
 */
static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec)
{
	volatile struct dbdma_cmd __iomem *cp;
	int c;
	int stat;

	spin_lock(&chip->reg_lock);
	if (rec->running) {
		cp = &rec->cmd.cmds[rec->cur_period];
		for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */
			stat = ld_le16(&cp->xfer_status);
			if (! (stat & ACTIVE))
				break;
			/*printk("update frag %d\n", rec->cur_period);*/
			st_le16(&cp->xfer_status, 0);
			st_le16(&cp->req_count, rec->period_size);
			/*st_le16(&cp->res_count, 0);*/
			rec->cur_period++;
			if (rec->cur_period >= rec->nperiods) {
				rec->cur_period = 0;
				cp = rec->cmd.cmds;
			} else
				cp++;
			spin_unlock(&chip->reg_lock);
			snd_pcm_period_elapsed(rec->substream);
			spin_lock(&chip->reg_lock);
		}
	}
	spin_unlock(&chip->reg_lock);
}
Exemplo n.º 3
0
/*
 * Handle DEAD DMA transfers:
 * if the TX status comes up "DEAD" - reported on some Power Computing machines
 * we need to re-start the dbdma - but from a different physical start address
 * and with a different transfer length.  It would get very messy to do this
 * with the normal dbdma_cmd blocks - we would have to re-write the buffer start
 * addresses each time.  So, we will keep a single dbdma_cmd block which can be
 * fiddled with.
 * When DEAD status is first reported the content of the faulted dbdma block is
 * copied into the emergency buffer and we note that the buffer is in use.
 * we then bump the start physical address by the amount that was successfully
 * output before it died.
 * On any subsequent DEAD result we just do the bump-ups (we know that we are
 * already using the emergency dbdma_cmd).
 * CHECK: this just tries to "do it".  It is possible that we should abandon
 * xfers when the number of residual bytes gets below a certain value - I can
 * see that this might cause a loop-forever if a too small transfer causes
 * DEAD status.  However this is a TODO for now - we'll see what gets reported.
 * When we get a successful transfer result with the emergency buffer we just
 * pretend that it completed using the original dmdma_cmd and carry on.  The
 * 'next_cmd' field will already point back to the original loop of blocks.
 */
static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec,
					  volatile struct dbdma_cmd __iomem *cp)
{
	unsigned short req, res ;
	unsigned int phy ;

	/* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */

	/* to clear DEAD status we must first clear RUN
	   set it to quiescent to be on the safe side */
	(void)in_le32(&rec->dma->status);
	out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16);

	if (!emergency_in_use) { /* new problem */
		memcpy((void *)emergency_dbdma.cmds, (void *)cp,
		       sizeof(struct dbdma_cmd));
		emergency_in_use = 1;
		st_le16(&cp->xfer_status, 0);
		st_le16(&cp->req_count, rec->period_size);
		cp = emergency_dbdma.cmds;
	}

	/* now bump the values to reflect the amount
	   we haven't yet shifted */
	req = ld_le16(&cp->req_count);
	res = ld_le16(&cp->res_count);
	phy = ld_le32(&cp->phy_addr);
	phy += (req - res);
	st_le16(&cp->req_count, res);
	st_le16(&cp->res_count, 0);
	st_le16(&cp->xfer_status, 0);
	st_le32(&cp->phy_addr, phy);

	st_le32(&cp->cmd_dep, rec->cmd.addr
		+ sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods));

	st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS);

	/* point at our patched up command block */
	out_le32(&rec->dma->cmdptr, emergency_dbdma.addr);

	/* we must re-start the controller */
	(void)in_le32(&rec->dma->status);
	/* should complete clearing the DEAD status */
	out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
}
Exemplo n.º 4
0
static void xfer_timeout(unsigned long data)
{
	struct floppy_state *fs = (struct floppy_state *) data;
	volatile struct swim3 *sw = fs->swim3;
	struct dbdma_regs *dr = fs->dma;
	struct dbdma_cmd *cp = fs->dma_cmd;
	unsigned long s;

	fs->timeout_pending = 0;
	st_le32(&dr->control, RUN << 16);
	out_8(&sw->intr_enable, 0);
	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
	out_8(&sw->select, RELAX);
	if (rq_data_dir(fd_req) == WRITE)
		++cp;
	if (ld_le16(&cp->xfer_status) != 0)
		s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
	else
Exemplo n.º 5
0
static void xfer_timeout(unsigned long data)
{
	struct floppy_state *fs = (struct floppy_state *) data;
	struct swim3 __iomem *sw = fs->swim3;
	struct dbdma_regs __iomem *dr = fs->dma;
	struct dbdma_cmd *cp = fs->dma_cmd;
	unsigned long s;
	int n;

	fs->timeout_pending = 0;
	out_le32(&dr->control, RUN << 16);
	/* We must wait a bit for dbdma to stop */
	for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
		udelay(1);
	out_8(&sw->intr_enable, 0);
	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
	out_8(&sw->select, RELAX);
	if (rq_data_dir(fd_req) == WRITE)
		++cp;
	if (ld_le16(&cp->xfer_status) != 0)
		s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
	else
Exemplo n.º 6
0
/*
 * update playback/capture pointer from interrupts
 */
static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec)
{
	volatile struct dbdma_cmd __iomem *cp;
	int c;
	int stat;

	spin_lock(&chip->reg_lock);
	if (rec->running) {
		for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */

			if (emergency_in_use)   /* already using DEAD xfer? */
				cp = emergency_dbdma.cmds;
			else
				cp = &rec->cmd.cmds[rec->cur_period];

			stat = ld_le16(&cp->xfer_status);

			if (stat & DEAD) {
				snd_pmac_pcm_dead_xfer(rec, cp);
				break; /* this block is still going */
			}

			if (emergency_in_use)
				emergency_in_use = 0 ; /* done that */

			if (! (stat & ACTIVE))
				break;

			/*printk("update frag %d\n", rec->cur_period);*/
			st_le16(&cp->xfer_status, 0);
			st_le16(&cp->req_count, rec->period_size);
			/*st_le16(&cp->res_count, 0);*/
			rec->cur_period++;
			if (rec->cur_period >= rec->nperiods) {
				rec->cur_period = 0;
			}

			spin_unlock(&chip->reg_lock);
			snd_pcm_period_elapsed(rec->substream);
			spin_lock(&chip->reg_lock);
		}
	}
	spin_unlock(&chip->reg_lock);
}
/*
 * return the current pointer
 */
inline
static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip,
					      struct pmac_stream *rec,
					      struct snd_pcm_substream *subs)
{
	int count = 0;

#if 1 /* hmm.. how can we get the current dma pointer?? */
	int stat;
	volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period];
	stat = ld_le16(&cp->xfer_status);
	if (stat & (ACTIVE|DEAD)) {
		count = in_le16(&cp->res_count);
		if (count)
			count = rec->period_size - count;
	}
#endif
	count += rec->cur_period * rec->period_size;
	/*printk("pointer=%d\n", count);*/
	return bytes_to_frames(subs->runtime, count);
}
Exemplo n.º 8
0
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
	struct floppy_state *fs = (struct floppy_state *) dev_id;
	struct swim3 __iomem *sw = fs->swim3;
	int intr, err, n;
	int stat, resid;
	struct dbdma_regs __iomem *dr;
	struct dbdma_cmd *cp;

	intr = in_8(&sw->intr);
	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
	if ((intr & ERROR_INTR) && fs->state != do_transfer)
		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
		       fs->state, rq_data_dir(fd_req), intr, err);
	switch (fs->state) {
	case locating:
		if (intr & SEEN_SECTOR) {
			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (sw->ctrack == 0xff) {
				printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
				fs->cur_cyl = -1;
				if (fs->retries > 5) {
					swim3_end_request_cur(-EIO);
					fs->state = idle;
					start_request(fs);
				} else {
					fs->state = jogging;
					act(fs);
				}
				break;
			}
			fs->cur_cyl = sw->ctrack;
			fs->cur_sector = sw->csect;
			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
				printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
				       fs->expect_cyl, fs->cur_cyl);
			fs->state = do_transfer;
			act(fs);
		}
		break;
	case seeking:
	case jogging:
		if (sw->nseek == 0) {
			out_8(&sw->control_bic, DO_SEEK);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (fs->state == seeking)
				++fs->retries;
			fs->state = settling;
			act(fs);
		}
		break;
	case settling:
		out_8(&sw->intr_enable, 0);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		act(fs);
		break;
	case do_transfer:
		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
			break;
		out_8(&sw->intr_enable, 0);
		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
		out_8(&sw->select, RELAX);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		dr = fs->dma;
		cp = fs->dma_cmd;
		if (rq_data_dir(fd_req) == WRITE)
			++cp;
		/*
		 * Check that the main data transfer has finished.
		 * On writing, the swim3 sometimes doesn't use
		 * up all the bytes of the postamble, so we can still
		 * see DMA active here.  That doesn't matter as long
		 * as all the sector data has been transferred.
		 */
		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
			/* wait a little while for DMA to complete */
			for (n = 0; n < 100; ++n) {
				if (cp->xfer_status != 0)
					break;
				udelay(1);
				barrier();
			}
		}
		/* turn off DMA */
		out_le32(&dr->control, (RUN | PAUSE) << 16);
		stat = ld_le16(&cp->xfer_status);
		resid = ld_le16(&cp->res_count);
		if (intr & ERROR_INTR) {
			n = fs->scount - 1 - resid / 512;
			if (n > 0) {
				blk_update_request(fd_req, 0, n << 9);
				fs->req_sector += n;
			}
			if (fs->retries < 5) {
				++fs->retries;
				act(fs);
			} else {
				printk("swim3: error %sing block %ld (err=%x)\n",
				       rq_data_dir(fd_req) == WRITE? "writ": "read",
				       (long)blk_rq_pos(fd_req), err);
				swim3_end_request_cur(-EIO);
				fs->state = idle;
			}
		} else {
			if ((stat & ACTIVE) == 0 || resid != 0) {
				/* musta been an error */
				printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
				printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
				       fs->state, rq_data_dir(fd_req), intr, err);
				swim3_end_request_cur(-EIO);
				fs->state = idle;
				start_request(fs);
				break;
			}
			if (swim3_end_request(0, fs->scount << 9)) {
				fs->req_sector += fs->scount;
				if (fs->req_sector > fs->secpertrack) {
					fs->req_sector -= fs->secpertrack;
					if (++fs->head > 1) {
						fs->head = 0;
						++fs->req_cyl;
					}
				}
				act(fs);
			} else
				fs->state = idle;
		}
		if (fs->state == idle)
			start_request(fs);
		break;
	default:
		printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
	}
	return IRQ_HANDLED;
}
Exemplo n.º 9
0
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
                                      struct kvm_run *run)
{
	u64 uninitialized_var(gpr);

	if (run->mmio.len > sizeof(gpr)) {
		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
		return;
	}

	if (vcpu->arch.mmio_is_bigendian) {
		switch (run->mmio.len) {
		case 8: gpr = *(u64 *)run->mmio.data; break;
		case 4: gpr = *(u32 *)run->mmio.data; break;
		case 2: gpr = *(u16 *)run->mmio.data; break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
		}
	} else {
		/* Convert BE data from userland back to LE. */
		switch (run->mmio.len) {
		case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
		case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
		}
	}

	if (vcpu->arch.mmio_sign_extend) {
		switch (run->mmio.len) {
#ifdef CONFIG_PPC64
		case 4:
			gpr = (s64)(s32)gpr;
			break;
#endif
		case 2:
			gpr = (s64)(s16)gpr;
			break;
		case 1:
			gpr = (s64)(s8)gpr;
			break;
		}
	}

	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);

	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
	case KVM_MMIO_REG_GPR:
		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
		break;
	case KVM_MMIO_REG_FPR:
		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
		break;
#ifdef CONFIG_PPC_BOOK3S
	case KVM_MMIO_REG_QPR:
		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
		break;
	case KVM_MMIO_REG_FQPR:
		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
		break;
#endif
	default:
		BUG();
	}
}
Exemplo n.º 10
0
uint16_t _ld_le16(uint16_t *addr)
{
	return ld_le16(addr);
}
uint16_t pci_mem_le_ld_le16(uint16_t *adr)
{
	return ld_le16(adr);
}