Esempio n. 1
0
/*H:430
 * (iv) Switching page tables
 *
 * Now we've seen all the page table setting and manipulation, let's see
 * what happens when the Guest changes page tables (ie. changes the top-level
 * pgdir).  This occurs on almost every context switch.
 */
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
{
	int newpgdir, repin = 0;

	/*
	 * The very first time they call this, we're actually running without
	 * any page tables; we've been making it up.  Throw them away now.
	 */
	if (unlikely(cpu->linear_pages)) {
		release_all_pagetables(cpu->lg);
		cpu->linear_pages = false;
		/* Force allocation of a new pgdir. */
		newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
	} else {
		/* Look to see if we have this one already. */
		newpgdir = find_pgdir(cpu->lg, pgtable);
	}

	/*
	 * If not, we allocate or mug an existing one: if it's a fresh one,
	 * repin gets set to 1.
	 */
	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
		newpgdir = new_pgdir(cpu, pgtable, &repin);
	/* Change the current pgd index to the new one. */
	cpu->cpu_pgd = newpgdir;
	/* If it was completely blank, we map in the Guest kernel stack */
	if (repin)
		pin_stack_pages(cpu);
}
Esempio n. 2
0
/*
 * We also throw away everything when a Guest tells us it's changed a kernel
 * mapping.  Since kernel mappings are in every page table, it's easiest to
 * throw them all away.  This traps the Guest in amber for a while as
 * everything faults back in, but it's rare.
 */
void guest_pagetable_clear_all(struct lg_cpu *cpu)
{
	release_all_pagetables(cpu->lg);
	/* We need the Guest kernel stack mapped again. */
	pin_stack_pages(cpu);
	/* And we need Switcher allocated. */
	if (!allocate_switcher_mapping(cpu))
		kill_guest(cpu, "Cannot populate switcher mapping");
}
Esempio n. 3
0
/* When a Guest dies, our cleanup is fairly simple. */
void free_guest_pagetable(struct lguest *lg)
{
	unsigned int i;

	/* Throw away all page table pages. */
	release_all_pagetables(lg);
	/* Now free the top levels: free_page() can handle 0 just fine. */
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
		free_page((long)lg->pgdirs[i].pgdir);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
	struct file *file = q->priv_data;
	struct saa7146_fh *fh = file->private_data;
	struct saa7146_dev *dev = fh->dev;
	struct saa7146_buf *buf = (struct saa7146_buf *)vb;

	DEB_CAP("vbuf:%p\n", vb);

	saa7146_dma_free(dev,q,buf);

	release_all_pagetables(dev, buf);
}
Esempio n. 5
0
/*H:500
 * (vii) Setting up the page tables initially.
 *
 * When a Guest is first created, set initialize a shadow page table which
 * we will populate on future faults.  The Guest doesn't have any actual
 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
 * for the moment.
 *
 * We do need the Switcher to be mapped at all times, so we allocate that
 * part of the Guest page table here.
 */
int init_guest_pagetable(struct lguest *lg)
{
	struct lg_cpu *cpu = &lg->cpus[0];
	int allocated = 0;

	/* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
	cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
	if (!allocated)
		return -ENOMEM;

	/* We start with a linear mapping until the initialize. */
	cpu->linear_pages = true;

	/* Allocate the page tables for the Switcher. */
	if (!allocate_switcher_mapping(cpu)) {
		release_all_pagetables(lg);
		return -ENOMEM;
	}

	return 0;
}
Esempio n. 6
0
/*
 * We also throw away everything when a Guest tells us it's changed a kernel
 * mapping.  Since kernel mappings are in every page table, it's easiest to
 * throw them all away.  This traps the Guest in amber for a while as
 * everything faults back in, but it's rare.
 */
void guest_pagetable_clear_all(struct lg_cpu *cpu)
{
	release_all_pagetables(cpu->lg);
	/* We need the Guest kernel stack mapped again. */
	pin_stack_pages(cpu);
}
static int buffer_prepare(struct videobuf_queue *q,
			  struct videobuf_buffer *vb, enum v4l2_field field)
{
	struct file *file = q->priv_data;
	struct saa7146_fh *fh = file->private_data;
	struct saa7146_dev *dev = fh->dev;
	struct saa7146_vv *vv = dev->vv_data;
	struct saa7146_buf *buf = (struct saa7146_buf *)vb;
	int size,err = 0;

	DEB_CAP("vbuf:%p\n", vb);

	/* sanity checks */
	if (fh->video_fmt.width  < 48 ||
	    fh->video_fmt.height < 32 ||
	    fh->video_fmt.width  > vv->standard->h_max_out ||
	    fh->video_fmt.height > vv->standard->v_max_out) {
		DEB_D("w (%d) / h (%d) out of bounds\n",
		      fh->video_fmt.width, fh->video_fmt.height);
		return -EINVAL;
	}

	size = fh->video_fmt.sizeimage;
	if (0 != buf->vb.baddr && buf->vb.bsize < size) {
		DEB_D("size mismatch\n");
		return -EINVAL;
	}

	DEB_CAP("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
		fh->video_fmt.width, fh->video_fmt.height,
		size, v4l2_field_names[fh->video_fmt.field]);
	if (buf->vb.width  != fh->video_fmt.width  ||
	    buf->vb.bytesperline != fh->video_fmt.bytesperline ||
	    buf->vb.height != fh->video_fmt.height ||
	    buf->vb.size   != size ||
	    buf->vb.field  != field      ||
	    buf->vb.field  != fh->video_fmt.field  ||
	    buf->fmt       != &fh->video_fmt) {
		saa7146_dma_free(dev,q,buf);
	}

	if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
		struct saa7146_format *sfmt;

		buf->vb.bytesperline  = fh->video_fmt.bytesperline;
		buf->vb.width  = fh->video_fmt.width;
		buf->vb.height = fh->video_fmt.height;
		buf->vb.size   = size;
		buf->vb.field  = field;
		buf->fmt       = &fh->video_fmt;
		buf->vb.field  = fh->video_fmt.field;

		sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);

		release_all_pagetables(dev, buf);
		if( 0 != IS_PLANAR(sfmt->trans)) {
			saa7146_pgtable_alloc(dev->pci, &buf->pt[0]);
			saa7146_pgtable_alloc(dev->pci, &buf->pt[1]);
			saa7146_pgtable_alloc(dev->pci, &buf->pt[2]);
		} else {
			saa7146_pgtable_alloc(dev->pci, &buf->pt[0]);
		}

		err = videobuf_iolock(q,&buf->vb, &vv->ov_fb);
		if (err)
			goto oops;
		err = saa7146_pgtable_build(dev,buf);
		if (err)
			goto oops;
	}
	buf->vb.state = VIDEOBUF_PREPARED;
	buf->activate = buffer_activate;

	return 0;

 oops:
	DEB_D("error out\n");
	saa7146_dma_free(dev,q,buf);

	return err;
}