Esempio n. 1
0
void
putmmu(ulong va, ulong pa, Page *)
{
	ulong *l1a, *l1b, *l2;
	int l1o, l2o;
	
	l1o = va / MiB;
	l2o = (va % MiB) / BY2PG;
	l1a = KADDR(L1PT);
	l1b = up->l1;
	if(l1a[l1o] == 0){
		if((pa & PTEVALID) == 0)
			return;
		l2 = xspanalloc(L2SIZ, L2SIZ, 0);
		l1a[l1o] = l1b[l1o] = PADDR(l2) | Coarse;
	} else
		l2 = KADDR(ROUNDDN(l1a[l1o], L2SIZ));
	l2 += l2o;
	if((pa & PTEVALID) == 0){
		*l2 = 0;
		flushtlb();
		return;
	}
	*l2 = ROUNDDN(pa, BY2PG) | Small;
	if((pa & PTEWRITE) == 0)
		*l2 |= L2AP(Uro);
	else
		*l2 |= L2AP(Urw);
	if((pa & PTEUNCACHED) == 0)
		*l2 |= Buffered | Cached;
	flushtlb();
}
Esempio n. 2
0
File: vm.c Progetto: TBReinhart/os_4
//Wille return 0 if error, 1 if success
int check_page_fault(pde_t *pgdir, uint va) {
    pte_t *pte;
    uint pa;
    char *mem;

    //check if exists, and allowed by user
    if(va >= KERNBASE || va < 4096) {
        cprintf("Kernel or Null memory access\n");
        return 0;
    }
    if((pte = walkpgdir(pgdir, (void *)va, 0)) == 0) {
        cprintf("memory access not in page dir\n");
        return 0;
    }
    if( (!(*pte & PTE_P)) || (!(*pte & PTE_U)) ) {
        cprintf("memory access not for users\n");
        return 0;
    }
    if( !(*pte & PTE_COW)) {
        cprintf("No cow bit, writing to read only mem\n");
        return 0;
    }
    if( *pte & PTE_W) {
        cprintf("Writing other processes mem, error\n");
        return 0;
    }
    pa = PTE_ADDR(*pte);
    //CHANGE: update reference counts
    acquire(&r_c.lock);
    if(r_c.ref_count[pa / 4096] == 1) {
        *pte = *pte | PTE_W;
        *pte = *pte & (~PTE_COW);
        release(&r_c.lock);
        //flush translation lookaside buffer
        flushtlb();
        return 1;
    } 
    else {
        r_c.ref_count[pa / 4096]--;
        release(&r_c.lock);
        if((mem = kalloc()) == 0) {
            return 0;
        }
        memmove(mem, (char*)p2v(pa), PGSIZE);
        *pte = v2p(mem) | PTE_FLAGS(*pte) | PTE_W;
        *pte = *pte & (~PTE_COW);
        acquire(&r_c.lock);
        r_c.ref_count[v2p(mem) / 4096] = 1;
        release(&r_c.lock);
        //flush translation lookaside buffer
        flushtlb();
        return 1;
    }
}
Esempio n. 3
0
File: vm.c Progetto: TBReinhart/os_4
// Given a parent process's page table, create a copy
// of it for a child. 
// CHANGE: copy on write
pde_t*
copyuvm(pde_t *pgdir, uint sz)
{
  pde_t *d;
  pte_t *pte;
  uint pa, i, flags;
  if((d = setupkvm()) == 0)
    return 0;
  // CHANGE  TREINHART TO 4096
  for(i = 4096; i < sz; i += PGSIZE){
    if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0)
      panic("copyuvm: pte should exist");
    if(!(*pte & PTE_P))
      panic("copyuvm: page not present");

    *pte = *pte & (~PTE_W);
    *pte = *pte | PTE_COW;
    pa = PTE_ADDR(*pte);
    flags = PTE_FLAGS(*pte);

    //CHANGE: update reference counts
    if(mappages(d, (void*)i, PGSIZE, pa, flags) < 0)
      goto bad;
    r_c.ref_count[pa/ 4096] ++;
  }
  flushtlb();
  return d;

bad:
  freevm(d);
  return 0;
}
Esempio n. 4
0
void
mmuinit(void)
{
	ulong *l1, l2, *pl2;
	int i, n;
	extern ulong *uart;

	l1 = KADDR(L1PT);
	l2 = IOPT;
	n = NIOPAGES / 256;
	memset(KADDR(l2), 0, n * L2SIZ);
	for(i = 0; i < n; i++){
		l1[(IZERO / MiB) + i] = l2 | Coarse;
		l2 += L2SIZ;
	}
	uart = vmap((ulong) uart, BY2PG);
	periph = vmap(0x48240000, 2 * BY2PG);
	memset(l1, 0, sizeof(ulong) * (IZERO / MiB));
	l1[4095] = PRIVL2 | Coarse;
	pl2 = KADDR(PRIVL2);
	for(i = 0; i < 240; i++)
		pl2[i] = (0x8FF00000 + i * BY2PG) | L2AP(Krw) | Small | Cached | Buffered;
	pl2[240] = PHYSVECTORS | L2AP(Krw) | Small | Cached | Buffered;
	pl2[241] = FIRSTMACH | L2AP(Krw) | Small | Cached | Buffered;
	flushtlb();
	m = (Mach *) MACHADDR;
}
Esempio n. 5
0
void
mmuswitch(Proc *p)
{
	ulong *l1;
	
	l1 = KADDR(L1PT);
	memmove(l1, p->l1, sizeof p->l1);
	flushtlb();
}
Esempio n. 6
0
void
flushmmu(void)
{
	int s, i;
	ulong p;
	ulong *l1;

	l1 = KADDR(L1PT);
	s = splhi();
	for(i = 0; i < nelem(up->l1); i++){
		p = l1[i];
		if(p & Small)
			free(KADDR(ROUNDDN(p, BY2PG)));
	}
	memset(up->l1, 0, sizeof up->l1);
	memset(l1, 0, sizeof up->l1);
	flushtlb();
	splx(s);
}
Esempio n. 7
0
void *
vmap(ulong phys, ulong length)
{
	ulong virt, off, *l2;

	off = phys % BY2PG;
	length = (ROUNDUP(phys + length, BY2PG) - ROUNDDN(phys, BY2PG)) / BY2PG;
	if(length == 0)
		return nil;
	phys = ROUNDDN(phys, BY2PG);
	virt = getiopages(length);
	l2 = KADDR(IOPT);
	l2 += virt;
	while(length--){
		*l2++ = phys | L2AP(Krw) | Small | PTEIO;
		phys += BY2PG;
	}
	flushtlb();
	return (void *) (IZERO + BY2PG * virt + off);
}
Esempio n. 8
0
void
vunmap(void *virt, ulong length)
{
	ulong v, *l2;
	
	if((ulong)virt < IZERO || (ulong)virt >= IZERO + NIOPAGES * BY2PG)
		panic("vunmap: virt=%p", virt);
	v = (ROUNDDN((ulong) virt, BY2PG) - IZERO) / BY2PG;
	length = (ROUNDUP(((ulong) virt) + length, BY2PG) - ROUNDDN((ulong) virt, BY2PG)) / BY2PG;
	if(length == 0)
		return;
	l2 = KADDR(IOPT);
	l2 += v;
	lock(&iopagelock);
	while(length--){
		*l2++ = 0;
		freeio(v++);
	}
	unlock(&iopagelock);
	flushtlb();
}
Esempio n. 9
0
int nugpgpu_ringbuffer_render_init(struct nugpgpu_private *gpu_priv)
{
  int ret;
  u32 head;

  printk(LOG_INFO "nugpgpu_ringbuffer_render_init\n" LOG_END);
  TRACE_IN

  RING->mmio_base = RENDER_RING_BASE;
  RING->size = PAGE_SIZE * RING_PAGES;

  /* Allocate the status page. */
  ret = allocate_object(gpu_priv, &RING->status_obj, 1);
  if (ret){
    printk(LOG_ERR "Failed to allocate the status page\n" LOG_END);
    return 1;
  }

  RING->gva_status = nugpgpu_gtt_insert(gpu_priv, RING->status_obj.pg_list, 
                                        NUGPGPU_CACHE_LLC);
  if (RING->gva_status == (unsigned int)-1){
    printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "RING->gva_status : 0x%x\n" LOG_END, (unsigned int) RING->gva_status);

  RING->page_status = kmap(sg_page(RING->status_obj.pg_list->sgl));
  if (RING->page_status == NULL) {
    printk(LOG_ERR "Failed to map page_status\n" LOG_END);
    return 1;
  }
  memset(RING->page_status, 0, PAGE_SIZE);
  printk(LOG_INFO "RING->page_status : 0x%lx\n" LOG_END, (unsigned long) RING->page_status);

  /* Allocate the ringbuffer object */
  ret = allocate_object(gpu_priv, &RING->ringbuf_obj, RING_PAGES);
  if (ret){
    printk(LOG_ERR "Failed to allocate the status page\n" LOG_END);
    return 1;
  }

  RING->gva_ringbuffer = nugpgpu_gtt_insert(gpu_priv, RING->ringbuf_obj.pg_list, 
                                            NUGPGPU_CACHE_LLC);
  if (RING->gva_ringbuffer == (unsigned int)-1){
    printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "RING->gva_ringbuffer : 0x%x\n" LOG_END, (unsigned int) RING->gva_ringbuffer);

  RING->page_ringbuffer = kmap(sg_page(RING->ringbuf_obj.pg_list->sgl));
  if (RING->page_ringbuffer == NULL) {
    printk(LOG_ERR "Failed to map page_ringbuffer\n" LOG_END);
    return 1;
  }

  RING->virtual_start = ioremap_wc(gpu_priv->gtt.mappable_base + PAGE_SIZE, RING->size);
  if (RING->virtual_start == NULL) {
    printk(LOG_ERR "Problem while mapping virtual start ioremap_wc\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "Allocated the ringbuffer\n" LOG_END);

  /* Initialize the ring now.*/

  gpu_forcewake_get(gpu_priv);

  /* Write status page register */
  printk(LOG_INFO "writing status page register\n" LOG_END);

  NUGPGPU_WRITE(RENDER_HWS_PGA_GEN7, RING->gva_status);
  NUGPGPU_READ(RENDER_HWS_PGA_GEN7);

  flushtlb(gpu_priv);

  // Stop ring
  printk(LOG_INFO "stopping ring\n" LOG_END);

  RING_WRITE_CTL(RING, 0);
  RING_WRITE_HEAD(RING, 0);
  RING_WRITE_TAIL(RING, 0);

  // The doc says this enforces ordering between multiple writes
  head = RING_READ_HEAD(RING) & RING_HEAD_ADDR;
  if ( head !=0 ){
    printk(LOG_ERR "failed to set head to zero\n" LOG_END);
    RING_WRITE_HEAD(RING, 0);

    if (RING_READ_HEAD(RING) & RING_HEAD_ADDR) {
      printk(LOG_ERR "failed to set ring head to zero "
                     "ctl %08x head %08x tail %08x start %08x\n"
             LOG_END,
             RING_READ_CTL(RING),
             RING_READ_HEAD(RING),
             RING_READ_TAIL(RING),
             RING_READ_START(RING));
    }
  }

  /* i915 driver says the below line...?? */
  /* Enforce ordering by reading HEAD register back */
  RING_READ_HEAD(RING);

  /* Comment taken directly from i915 driver */
  /* Initialize the ring. This must happen _after_ we've cleared the ring
   * registers with the above sequence (the readback of the HEAD registers
   * also enforces ordering), otherwise the hw might lose the new ring
   * register values. */
  RING_WRITE_START(RING, RING->gva_ringbuffer);

  RING_WRITE_CTL(RING, (((RING->size - PAGE_SIZE) &
                          RING_NR_PAGES) |
                          RING_VALID));

  /* If the head is still not zero, the ring is dead */
  if( wait_for((RING_READ_CTL(RING) & RING_VALID) != 0 &&
                RING_READ_START(RING) == RING->gva_ringbuffer &&
                (RING_READ_HEAD(RING) & RING_HEAD_ADDR) == 0, 50) ){
    printk(LOG_ERR "ring failed to start ring\n" LOG_END);
    return -EIO;
  }

  RING->head = RING_READ_HEAD(RING);
  RING->tail = RING_READ_TAIL(RING) & RING_TAIL_ADDR;
  RING->space = ring_space(RING);

  printk(LOG_INFO "ring->space = %d\n" LOG_END, RING->space);

  gpu_forcewake_put(gpu_priv);

  RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  RING_WRITE_MODE_GEN7(RING, _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
                       _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  RING_WRITE_INSTPM(RING, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));

  dword_check(gpu_priv, RING, temp);

  TRACE_OUT
  return 0;
}