ulong umbrwmalloc(ulong addr, int size, int align) { ulong a; uchar o[2], *p; if(a = mapalloc(&rmapumbrw, addr, size, align)) return(ulong)KADDR(a); /* * Perhaps the memory wasn't visible before * the interface is initialised, so try again. */ if((a = umbmalloc(addr, size, align)) == 0) return 0; p = (uchar*)a; o[0] = p[0]; p[0] = 0xCC; o[1] = p[size-1]; p[size-1] = 0xCC; if(p[0] == 0xCC && p[size-1] == 0xCC){ p[0] = o[0]; p[size-1] = o[1]; return a; } umbfree(a, size); return 0; }
ulong upamalloc(ulong addr, int size, int align) { ulong ae, a; USED(align); if((a = mapalloc(&rmapupa, addr, size, align)) == 0){ memdebug(); return 0; } /* * This is a travesty, but they all are. */ ae = mmukmap(a, 0, size); /* * Should check here that it was all delivered * and put it back and barf if not. */ USED(ae); /* * Be very careful this returns a PHYSICAL address. */ return a; }
static void umbexclude(void) { int size; ulong addr; char *op, *p, *rptr; if((p = getconf("umbexclude")) == nil) return; while(p && *p != '\0' && *p != '\n'){ op = p; addr = strtoul(p, &rptr, 0); if(rptr == nil || rptr == p || *rptr != '-'){ print("umbexclude: invalid argument <%s>\n", op); break; } p = rptr+1; size = strtoul(p, &rptr, 0) - addr + 1; if(size <= 0){ print("umbexclude: bad range <%s>\n", op); break; } if(rptr != nil && *rptr == ',') *rptr++ = '\0'; p = rptr; mapalloc(&rmapumb, addr, size, 0); } }
ulong umbmalloc(ulong addr, int size, int align) { ulong a; if(a = mapalloc(&rmapumb, addr, size, align)) return (ulong)KADDR(a); return 0; }
uint32_t umbmalloc(uint32_t addr, int size, int align) { uint32_t a; if(a = mapalloc(&rmapumb, addr, size, align)) return (uint32_t)KADDR(a); return 0; }
/* * Allocate from the ram map directly to make page tables. * Called by mmuwalk during e820scan. */ void* rampage(void) { ulong m; m = mapalloc(&rmapram, 0, BY2PG, BY2PG); if(m == 0) return nil; return KADDR(m); }
BD * bdalloc(int n) { ulong a; a = mapalloc(&bdmap, 0, n*sizeof(BD), 0); if(a == 0) panic("bdalloc"); return KADDR(a); }
/* * Give out otherwise-unused physical address space * for use in configuring devices. Note that unlike upamalloc * before it, upaalloc does not map the physical address * into virtual memory. Call vmap to do that. */ ulong upaalloc(int size, int align) { ulong a; a = mapalloc(&rmapupa, 0, size, align); if(a == 0){ print("out of physical address space allocating %d\n", size); mapprint(&rmapupa); } return a; }
void upareserve(ulong pa, int size) { ulong a; a = mapalloc(&rmapupa, pa, size, 0); if(a != pa){ /* * This can happen when we're using the E820 * map, which might have already reserved some * of the regions claimed by the pci devices. */ // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size); if(a != 0) mapfree(&rmapupa, a, size); } }
uint32_t umbrwmalloc(uint32_t addr, int size, int align) { uint32_t a; uint8_t *p; if(a = mapalloc(&rmapumbrw, addr, size, align)) return(uint32_t)KADDR(a); /* * Perhaps the memory wasn't visible before * the interface is initialised, so try again. */ if((a = umbmalloc(addr, size, align)) == 0) return 0; p = (uint8_t*)a; p[0] = 0xCC; p[size-1] = 0xCC; if(p[0] == 0xCC && p[size-1] == 0xCC) return a; umbfree(a, size); return 0; }
static void ramscan(ulong maxmem) { ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x; int nvalid[NMemType]; /* * The bootstrap code has has created a prototype page * table which maps the first MemMin of physical memory to KZERO. * The page directory is at m->pdb and the first page of * free memory is after the per-processor MMU information. */ pa = MemMin; /* * Check if the extended memory size can be obtained from the CMOS. * If it's 0 then it's either not known or >= 64MB. Always check * at least 24MB in case there's a memory gap (up to 8MB) below 16MB; * in this case the memory from the gap is remapped to the top of * memory. * The value in CMOS is supposed to be the number of KB above 1MB. */ if(maxmem == 0){ x = (nvramread(0x18)<<8)|nvramread(0x17); if(x == 0 || x >= (63*KB)) maxpa = MemMax; else maxpa = MB+x*KB; if(maxpa < 24*MB) maxpa = 24*MB; }else maxpa = maxmem; maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */ /* * March up memory from MemMin to maxpa 1MB at a time, * mapping the first page and checking the page can * be written and read correctly. The page tables are created here * on the fly, allocating from low memory as necessary. */ k0 = (ulong*)KADDR(0); kzero = *k0; map = 0; x = 0x12345678; memset(nvalid, 0, sizeof(nvalid)); /* * Can't map memory to KADDR(pa) when we're walking because * can only use KADDR for relatively low addresses. * Instead, map each 4MB we scan to the virtual address range * MemMin->MemMin+4MB while we are scanning. */ vbase = MemMin; while(pa < maxpa){ /* * Map the page. Use mapalloc(&rmapram, ...) to make * the page table if necessary, it will be returned to the * pool later if it isn't needed. Map in a fixed range (the second 4M) * because high physical addresses cannot be passed to KADDR. */ va = (void*)(vbase + pa%(4*MB)); table = &m->pdb[PDX(va)]; if(pa%(4*MB) == 0){ if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0) break; memset(KADDR(map), 0, BY2PG); *table = map|PTEWRITE|PTEVALID; memset(nvalid, 0, sizeof(nvalid)); } table = KADDR(PPN(*table)); pte = &table[PTX(va)]; *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID; mmuflushtlb(PADDR(m->pdb)); /* * Write a pattern to the page and write a different * pattern to a possible mirror at KZERO. If the data * reads back correctly the chunk is some type of RAM (possibly * a linearly-mapped VGA framebuffer, for instance...) and * can be cleared and added to the memory pool. If not, the * chunk is marked uncached and added to the UMB pool if <16MB * or is marked invalid and added to the UPA pool. */ *va = x; *k0 = ~x; if(*va == x){ nvalid[MemRAM] += MB/BY2PG; mapfree(&rmapram, pa, MB); do{ *pte++ = pa|PTEWRITE|PTEVALID; pa += BY2PG; }while(pa % MB); mmuflushtlb(PADDR(m->pdb)); /* memset(va, 0, MB); so damn slow to memset all of memory */ } else if(pa < 16*MB){ nvalid[MemUMB] += MB/BY2PG; mapfree(&rmapumb, pa, MB); do{ *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID; pa += BY2PG; }while(pa % MB); } else{ nvalid[MemUPA] += MB/BY2PG; mapfree(&rmapupa, pa, MB); *pte = 0; pa += MB; } /* * Done with this 4MB chunk, review the options: * 1) not physical memory and >=16MB - invalidate the PDB entry; * 2) physical memory - use the 4MB page extension if possible; * 3) not physical memory and <16MB - use the 4MB page extension * if possible; * 4) mixed or no 4MB page extension - commit the already * initialised space for the page table. */ if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){ /* * If we encounter a 4MB chunk of missing memory * at a sufficiently high offset, call it the end of * memory. Otherwise we run the risk of thinking * that video memory is real RAM. */ break; } if(pa <= maxkpa && pa%(4*MB) == 0){ table = &m->pdb[PDX(KADDR(pa - 4*MB))]; if(nvalid[MemUPA] == (4*MB)/BY2PG) *table = 0; else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08)) *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID; else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08)) *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID; else{ *table = map|PTEWRITE|PTEVALID; map = 0; } } mmuflushtlb(PADDR(m->pdb)); x += 0x3141526; } /* * If we didn't reach the end of the 4MB chunk, that part won't * be mapped. Commit the already initialised space for the page table. */ if(pa % (4*MB) && pa <= maxkpa){ m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID; map = 0; } if(map) mapfree(&rmapram, map, BY2PG); m->pdb[PDX(vbase)] = 0; mmuflushtlb(PADDR(m->pdb)); mapfree(&rmapupa, pa, (u32int)-pa); *k0 = kzero; }