static Vqueue* mkvqueue(int size) { Vqueue *q; uchar *p; int i; q = malloc(sizeof(*q) + sizeof(void*)*size); p = mallocalign( PGROUND(sizeof(Vdesc)*size + sizeof(Vring) + sizeof(u16int)*size + sizeof(u16int)) + PGROUND(sizeof(Vring) + sizeof(Vused)*size + sizeof(u16int)), BY2PG, 0, 0); if(p == nil || q == nil){ print("virtio: no memory for Vqueue\n"); free(p); free(q); return nil; } q->desc = (void*)p; p += sizeof(Vdesc)*size; q->avail = (void*)p; p += sizeof(Vring); q->availent = (void*)p; p += sizeof(u16int)*size; q->availevent = (void*)p; p += sizeof(u16int); p = (uchar*)PGROUND((ulong)p); q->used = (void*)p; p += sizeof(Vring); q->usedent = (void*)p; p += sizeof(Vused)*size; q->usedevent = (void*)p; q->free = -1; q->nfree = q->size = size; for(i=0; i<size; i++){ q->desc[i].next = q->free; q->free = i; } return q; }
void confinit(void) { ulong base; getramsize(&conf); conf.topofmem = 128*MB; getramsize(&conf); base = PGROUND((ulong)end); conf.base0 = base; conf.npage1 = 0; conf.npage0 = (conf.topofmem - base)/BY2PG; conf.npage = conf.npage0 + conf.npage1; conf.ialloc = (((conf.npage*(main_pool_pcnt))/100)/2)*BY2PG; conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; conf.nmach = 1; active.machs = 1; active.exiting = 0; print("Conf: top=%lud, npage0=%lud, ialloc=%lud, nproc=%lud\n", conf.topofmem, conf.npage0, conf.ialloc, conf.nproc); }
static void lowraminit(void) { ulong n, pa, x; uchar *bda; /* * Initialise the memory bank information for conventional memory * (i.e. less than 640KB). The base is the first location after the * bootstrap processor MMU information and the limit is obtained from * the BIOS data area. */ x = PADDR(CPU0END); bda = (uchar*)KADDR(0x400); n = ((bda[0x14]<<8)|bda[0x13])*KB-x; mapfree(&rmapram, x, n); memset(KADDR(x), 0, n); /* keep us honest */ x = PADDR(PGROUND((ulong)end)); pa = MemMin; if(x > pa) panic("kernel too big"); mapfree(&rmapram, x, pa-x); memset(KADDR(x), 0, pa-x); /* keep us honest */ }
uintptr syssegfree(va_list list) { Segment *s; uintptr from, to; from = va_arg(list, uintptr); to = va_arg(list, ulong); to += from; if(to < from) error(Ebadarg); s = seg(up, from, 1); if(s == nil) error(Ebadarg); to &= ~(BY2PG-1); from = PGROUND(from); if(from >= to) { qunlock(s); return 0; } if(to > s->top) { qunlock(s); error(Ebadarg); } mfreeseg(s, from, (to - from) / BY2PG); qunlock(s); flushmmu(); return 0; }
void pageinit(void) { int color, i, j; Page *p; Pallocmem *pm; ulong m, np, k, vkb, pkb; np = 0; for(i=0; i<nelem(palloc.mem); i++){ pm = &palloc.mem[i]; np += pm->npage; } palloc.pages = xalloc(np*sizeof(Page)); if(palloc.pages == 0) panic("pageinit"); color = 0; palloc.head = palloc.pages; p = palloc.head; for(i=0; i<nelem(palloc.mem); i++){ pm = &palloc.mem[i]; for(j=0; j<pm->npage; j++){ p->prev = p-1; p->next = p+1; p->pa = pm->base+j*BY2PG; p->color = color; palloc.freecount++; color = (color+1)%NCOLOR; p++; } } palloc.tail = p - 1; palloc.head->prev = 0; palloc.tail->next = 0; palloc.user = p - palloc.pages; pkb = palloc.user*BY2PG/1024; vkb = pkb + (conf.nswap*BY2PG)/1024; /* Paging numbers */ swapalloc.highwater = (palloc.user*5)/100; swapalloc.headroom = swapalloc.highwater + (swapalloc.highwater/4); m = 0; for(i=0; i<nelem(conf.mem); i++) if(conf.mem[i].npage) m += conf.mem[i].npage*BY2PG; k = PGROUND(end - (char*)KTZERO); print("%ldM memory: ", (m+k+1024*1024-1)/(1024*1024)); print("%ldM kernel data, ", (m+k-pkb*1024+1024*1024-1)/(1024*1024)); print("%ldM user, ", pkb/1024); print("%ldM swap\n", vkb/1024); }
static long segmentwrite(Chan *c, void *a, long n, vlong voff) { Cmdbuf *cb; Globalseg *g; ulong va, len, top; if(c->qid.type == QTDIR) error(Eperm); switch(TYPE(c)){ case Qctl: g = c->aux; cb = parsecmd(a, n); if(strcmp(cb->f[0], "va") == 0){ if(g->s != nil) error("already has a virtual address"); if(cb->nf < 3) error(Ebadarg); va = strtoul(cb->f[1], 0, 0); len = strtoul(cb->f[2], 0, 0); top = PGROUND(va + len); va = va&~(BY2PG-1); len = (top - va) / BY2PG; if(len == 0) error(Ebadarg); g->s = newseg(SG_SHARED, va, len); } else error(Ebadctl); break; case Qdata: g = c->aux; if(voff + n > g->s->top - g->s->base) error(Ebadarg); qlock(&g->l); g->off = voff + g->s->base; g->data = smalloc(n); if(waserror()){ free(g->data); qunlock(&g->l); nexterror(); } g->dlen = n; memmove(g->data, a, g->dlen); docmd(g, Cwrite); free(g->data); qunlock(&g->l); poperror(); return g->dlen; default: panic("segmentwrite"); } return 0; /* not reached */ }
int flashboot(int) { ulong entry, addr; void (*b)(void); Exec *ep; Block in; long n; uchar *p; if(flash.exec == 0) return -1; p = flash.exec; if(GLLONG(p) == Q_MAGIC){ /* unsqueezed: copy data and perhaps text, then jump to it */ ep = (Exec*)p; entry = PADDR(GLLONG(ep->entry)); p += sizeof(Exec); addr = entry; n = GLLONG(ep->text); if(addr != (ulong)p){ memmove((void*)addr, p, n); print("text: %8.8lux <- %8.8lux [%ld]\n", addr, p, n); } p += n; if(entry >= FLASHMEM) addr = 3*BY2PG; /* kernel text is in Flash, data in RAM */ else addr = PGROUND(addr+n); n = GLLONG(ep->data); memmove((void*)addr, p, n); print("data: %8.8lux <- %8.8lux [%ld]\n", addr, p, n); }else{ in.data = p; in.rp = in.data; in.lim = p+BOOTLEN; in.wp = in.lim; n = unsqueezef(&in, &entry); if(n < 0) return -1; } print("entry=0x%lux\n", entry); uartwait(); scc2stop(); /* * Go to new code. It's up to the program to get its PC relocated to * the right place. */ b = (void (*)(void))KADDR(PADDR(entry)); (*b)(); return -1; }
void mapupainit(uvlong addr, ulong size) { ulong s; uvlong a; /* * Careful - physical address. * Boot time only to populate map. */ a = PGROUND(addr); s = a - addr; if(s >= size) return; mapfree(&rmapupa, a, size-s); }
void mapupainit(uint64_t addr, uint32_t size) { uint32_t s; uint64_t a; /* * Careful - physical address. * Boot time only to populate map. */ a = PGROUND(addr); s = a - addr; if(s >= size) return; mapfree(&rmapupa, a, size-s); }
long syssegfree(ulong *arg) { Segment *s; ulong from, to; from = arg[0]; s = seg(up, from, 1); if(s == nil) error(Ebadarg); to = (from + arg[1]) & ~(BY2PG-1); from = PGROUND(from); if(to > s->top) { qunlock(&s->lk); error(Ebadarg); } mfreeseg(s, from, (to - from) / BY2PG); qunlock(&s->lk); flushmmu(); return 0; }
long ibrk(ulong addr, int seg) { Segment *s, *ns; ulong newtop, newsize; int i, mapsize; Pte **map; s = up->seg[seg]; if(s == 0) error(Ebadarg); if(addr == 0) return s->base; qlock(&s->lk); /* We may start with the bss overlapping the data */ if(addr < s->base) { if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) { qunlock(&s->lk); error(Enovmem); } addr = s->base; } newtop = PGROUND(addr); newsize = (newtop-s->base)/BY2PG; if(newtop < s->top) { /* * do not shrink a segment shared with other procs, as the * to-be-freed address space may have been passed to the kernel * already by another proc and is past the validaddr stage. */ if(s->ref > 1){ qunlock(&s->lk); error(Einuse); } mfreeseg(s, newtop, (s->top-newtop)/BY2PG); s->top = newtop; s->size = newsize; qunlock(&s->lk); flushmmu(); return 0; } for(i = 0; i < NSEG; i++) { ns = up->seg[i]; if(ns == 0 || ns == s) continue; if(newtop >= ns->base && newtop < ns->top) { qunlock(&s->lk); error(Esoverlap); } } if(newsize > (SEGMAPSIZE*PTEPERTAB)) { qunlock(&s->lk); error(Enovmem); } mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB; if(mapsize > s->mapsize){ map = smalloc(mapsize*sizeof(Pte*)); memmove(map, s->map, s->mapsize*sizeof(Pte*)); if(s->map != s->ssegmap) free(s->map); s->map = map; s->mapsize = mapsize; } s->top = newtop; s->size = newsize; qunlock(&s->lk); return 0; }
void confinit(void) { int i; ulong kpages; uintptr pa; char *p; /* * Copy the physical memory configuration to Conf.mem. */ if(nelem(sheevamem) > nelem(conf.mem)){ iprint("memory configuration botch\n"); exit(1); } if((p = getconf("*maxmem")) != nil) { memsize = strtoul(p, 0, 0) - PHYSDRAM; if (memsize < 16*MB) /* sanity */ memsize = 16*MB; } /* * see if all that memory exists; if not, find out how much does. * trapinit must have been called first. */ if (gotmem(memsize) < 0 && gotmem(256*MB) < 0 && gotmem(128*MB) < 0) { iprint("can't find any memory, assuming %dMB\n", Minmem / MB); memsize = Minmem; } sheevamem[0].limit = PHYSDRAM + memsize - 8*1024; memmove(conf.mem, sheevamem, sizeof(sheevamem)); conf.npage = 0; pa = PADDR(PGROUND(PTR2UINT(end))); /* * we assume that the kernel is at the beginning of one of the * contiguous chunks of memory and fits therein. */ for(i=0; i<nelem(conf.mem); i++){ /* take kernel out of allocatable space */ if(pa > conf.mem[i].base && pa < conf.mem[i].limit) conf.mem[i].base = pa; conf.mem[i].npage = (conf.mem[i].limit - conf.mem[i].base)/BY2PG; conf.npage += conf.mem[i].npage; } conf.upages = (conf.npage*90)/100; conf.ialloc = ((conf.npage-conf.upages)/2)*BY2PG; /* only one processor */ conf.nmach = 1; /* set up other configuration parameters */ conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; if(cpuserver) conf.nproc *= 3; if(conf.nproc > 2000) conf.nproc = 2000; conf.nswap = conf.npage*3; conf.nswppo = 4096; conf.nimage = 200; conf.copymode = 0; /* copy on write */ /* * Guess how much is taken by the large permanent * datastructures. Mntcache and Mntrpc are not accounted for * (probably ~300KB). */ kpages = conf.npage - conf.upages; kpages *= BY2PG; kpages -= conf.upages*sizeof(Page) + conf.nproc*sizeof(Proc) + conf.nimage*sizeof(Image) + conf.nswap + conf.nswppo*sizeof(Page*); mainmem->maxsize = kpages; if(!cpuserver) /* * give terminals lots of image memory, too; the dynamic * allocation will balance the load properly, hopefully. * be careful with 32-bit overflow. */ imagmem->maxsize = kpages; }
void confinit(void) { char *p; int userpcnt; ulong pa, kpages; /* passed in from ROM monitor: */ if(p = getconf("*kernelpercent")) userpcnt = 100 - strtol(p, 0, 0); else userpcnt = 0; pa = PGROUND(PADDR(end)); /* Blast Board specific */ conf.mem[0].npage = (MEM1SIZE - pa)/BY2PG; conf.mem[0].base = pa; conf.mem[1].npage = MEM2SIZE/BY2PG; conf.mem[1].base = MEM2BASE; conf.npage = conf.mem[0].npage + conf.mem[1].npage; conf.nmach = 1; conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; if(cpuserver) conf.nproc *= 3; if(conf.nproc > 2000) conf.nproc = 2000; conf.nimage = 200; conf.nswap = conf.nproc*80; conf.nswppo = 4096; conf.copymode = 0; /* copy on write */ if(cpuserver) { if(userpcnt < 10) userpcnt = 70; kpages = conf.npage - (conf.npage*userpcnt)/100; /* * Hack for the big boys. Only good while physmem < 4GB. * Give the kernel a max. of 16MB + enough to allocate the * page pool. * This is an overestimate as conf.upages < conf.npages. * The patch of nimage is a band-aid, scanning the whole * page list in imagereclaim just takes too long. */ if(kpages > (16*MB + conf.npage*sizeof(Page))/BY2PG){ kpages = (16*MB + conf.npage*sizeof(Page))/BY2PG; conf.nimage = 2000; kpages += (conf.nproc*KSTACK)/BY2PG; } } else { if(userpcnt < 10) { if(conf.npage*BY2PG < 16*MB) userpcnt = 40; else userpcnt = 60; } kpages = conf.npage - (conf.npage*userpcnt)/100; /* * Make sure terminals with low memory get at least * 4MB on the first Image chunk allocation. */ if(conf.npage*BY2PG < 16*MB) imagmem->minarena = 4*1024*1024; } conf.upages = conf.npage - kpages; conf.ialloc = (kpages/2)*BY2PG; /* * Guess how much is taken by the large permanent * datastructures. Mntcache and Mntrpc are not accounted for * (probably ~300KB). */ kpages *= BY2PG; kpages -= conf.upages*sizeof(Page) + conf.nproc*sizeof(Proc) + conf.nimage*sizeof(Image) + conf.nswap + conf.nswppo*sizeof(Page); mainmem->maxsize = kpages; if(!cpuserver){ /* * give terminals lots of image memory, too; the dynamic * allocation will balance the load properly, hopefully. * be careful with 32-bit overflow. */ imagmem->maxsize = kpages; } // conf.monitor = 1; /* BUG */ }
int bootpass(Boot *b, void *vbuf, int nbuf) { char *buf, *ebuf; Hdr *hdr; ulong magic, entry, data, text, bss; uvlong entry64; if(b->state == FAILED) return FAIL; if(nbuf == 0) goto Endofinput; buf = vbuf; ebuf = buf+nbuf; while(addbytes(&b->wp, b->ep, &buf, ebuf) == 0) { switch(b->state) { case INITKERNEL: b->state = READEXEC; b->bp = (char*)&b->hdr; b->wp = b->bp; b->ep = b->bp+sizeof(Hdr); break; case READEXEC: hdr = &b->hdr; magic = GLLONG(hdr->magic); if(magic == I_MAGIC || magic == S_MAGIC) { b->state = READ9TEXT; b->bp = (char*)PADDR(GLLONG(hdr->entry)); b->wp = b->bp; b->ep = b->wp+GLLONG(hdr->text); if(magic == I_MAGIC){ memmove(b->bp, b->hdr.uvl, sizeof(b->hdr.uvl)); b->wp += sizeof(b->hdr.uvl); } print("%lud", GLLONG(hdr->text)); break; } /* check for gzipped kernel */ if(b->bp[0] == 0x1F && (uchar)b->bp[1] == 0x8B && b->bp[2] == 0x08) { b->state = READGZIP; b->bp = (char*)malloc(1440*1024); b->wp = b->bp; b->ep = b->wp + 1440*1024; memmove(b->bp, &b->hdr, sizeof(Hdr)); b->wp += sizeof(Hdr); print("gz..."); break; } /* * Check for ELF. */ if(memcmp(b->bp, elfident, 4) == 0){ b->state = READEHDR; b->bp = (char*)&ehdr; b->wp = b->bp; b->ep = b->wp + sizeof(Ehdr); memmove(b->bp, &b->hdr, sizeof(Hdr)); b->wp += sizeof(Hdr); print("elf..."); break; } print("bad kernel format (magic == %#lux)\n", magic); b->state = FAILED; return FAIL; case READ9TEXT: hdr = &b->hdr; b->state = READ9DATA; b->bp = (char*)PGROUND(PADDR(GLLONG(hdr->entry))+GLLONG(hdr->text)); b->wp = b->bp; b->ep = b->wp + GLLONG(hdr->data); print("+%ld", GLLONG(hdr->data)); break; case READ9DATA: hdr = &b->hdr; bss = GLLONG(hdr->bss); memset(b->ep, 0, bss); print("+%ld=%ld\n", bss, GLLONG(hdr->text)+GLLONG(hdr->data)+bss); b->state = TRYBOOT; return ENOUGH; case READEHDR: if(!readehdr(b)){ print("readehdr failed\n"); b->state = FAILED; return FAIL; } break; case READPHDR: if(!readphdr(b)){ b->state = FAILED; return FAIL; } break; case READEPAD: if(!readepad(b)){ b->state = FAILED; return FAIL; } break; case READEDATA: if(!readedata(b)){ b->state = FAILED; return FAIL; } if(b->state == TRYBOOT) return ENOUGH; break; case TRYBOOT: case TRYEBOOT: case READGZIP: return ENOUGH; case READ9LOAD: case INIT9LOAD: panic("9load"); default: panic("bootstate"); } } return MORE; Endofinput: /* end of input */ switch(b->state) { case INITKERNEL: case READEXEC: case READ9TEXT: case READ9DATA: case READEHDR: case READPHDR: case READEPAD: case READEDATA: print("premature EOF\n"); b->state = FAILED; return FAIL; case TRYBOOT: entry = GLLONG(b->hdr.entry); magic = GLLONG(b->hdr.magic); if(magic == I_MAGIC){ print("entry: 0x%lux\n", entry); warp9(PADDR(entry)); } else if(magic == S_MAGIC){ entry64 = beswav(b->hdr.uvl[0]); warp64(entry64); } b->state = FAILED; return FAIL; case TRYEBOOT: entry = GLLONG(b->hdr.entry); if(ehdr.machine == I386){ print("entry: 0x%lux\n", entry); warp9(PADDR(entry)); } else if(ehdr.machine == AMD64){ print("entry: 0x%lux\n", entry); warp64(entry); } b->state = FAILED; return FAIL; case READGZIP: hdr = &b->hdr; if(b->bp[0] != 0x1F || (uchar)b->bp[1] != 0x8B || b->bp[2] != 0x08) print("lost magic\n"); print("%ld => ", b->wp - b->bp); if(gunzip((uchar*)hdr, sizeof(*hdr), (uchar*)b->bp, b->wp - b->bp) < sizeof(*hdr)) { print("badly compressed kernel\n"); return FAIL; } entry = GLLONG(hdr->entry); text = GLLONG(hdr->text); data = GLLONG(hdr->data); bss = GLLONG(hdr->bss); print("%lud+%lud+%lud=%lud\n", text, data, bss, text+data+bss); if(gunzip((uchar*)PADDR(entry)-sizeof(Exec), sizeof(Exec)+text+data, (uchar*)b->bp, b->wp-b->bp) < sizeof(Exec)+text+data) { print("error uncompressing kernel\n"); return FAIL; } /* relocate data to start at page boundary */ memmove((void*)PGROUND(PADDR(entry+text)), (void*)(PADDR(entry+text)), data); entry = GLLONG(b->hdr.entry); magic = GLLONG(b->hdr.magic); if(magic == I_MAGIC){ print("entry: 0x%lux\n", entry); warp9(PADDR(entry)); } else if(magic == S_MAGIC){ entry64 = beswav(b->hdr.uvl[0]); warp64(entry64); } b->state = FAILED; return FAIL; case INIT9LOAD: case READ9LOAD: panic("end 9load"); default: panic("bootdone"); } b->state = FAILED; return FAIL; }
static void map(ulong base, ulong len, int type) { ulong e, n; ulong *table, flags, maxkpa; /* * Split any call crossing MemMin to make below simpler. */ if(base < MemMin && len > MemMin-base){ n = MemMin - base; map(base, n, type); map(MemMin, len-n, type); } /* * Let lowraminit and umbscan hash out the low MemMin. */ if(base < MemMin) return; /* * Any non-memory below 16*MB is used as upper mem blocks. */ if(type == MemUPA && base < 16*MB && base+len > 16*MB){ map(base, 16*MB-base, MemUMB); map(16*MB, len-(16*MB-base), MemUPA); return; } /* * Memory below CPU0END is reserved for the kernel * and already mapped. */ if(base < PADDR(CPU0END)){ n = PADDR(CPU0END) - base; if(len <= n) return; map(PADDR(CPU0END), len-n, type); return; } /* * Memory between KTZERO and end is the kernel itself * and is already mapped. */ if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){ map(base, PADDR(KTZERO)-base, type); return; } if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){ n = PADDR(PGROUND((ulong)end)); if(len <= n) return; map(PADDR(PGROUND((ulong)end)), len-n, type); return; } /* * Now we have a simple case. */ // print("map %.8lux %.8lux %d\n", base, base+len, type); switch(type){ case MemRAM: mapfree(&rmapram, base, len); flags = PTEWRITE|PTEVALID; break; case MemUMB: mapfree(&rmapumb, base, len); flags = PTEWRITE|PTEUNCACHED|PTEVALID; break; case MemUPA: mapfree(&rmapupa, base, len); flags = 0; break; default: case MemReserved: flags = 0; break; } /* * bottom MemMin is already mapped - just twiddle flags. * (not currently used - see above) */ if(base < MemMin){ table = KADDR(PPN(m->pdb[PDX(base)])); e = base+len; base = PPN(base); for(; base<e; base+=BY2PG) table[PTX(base)] |= flags; return; } /* * Only map from KZERO to 2^32. */ if(flags){ maxkpa = -KZERO; if(base >= maxkpa) return; if(len > maxkpa-base) len = maxkpa - base; pdbmap(m->pdb, base|flags, base+KZERO, len); } }
/* * we assume that the kernel is at the beginning of one of the * contiguous chunks of memory. */ void confinit(void) { int i, j; ulong addr; ulong ktop; /* find first two contiguous sections of available memory */ addr = PHYSDRAM0; for(i=0; i<nelem(conf.mem); i++){ conf.mem[i].base = addr; conf.mem[i].limit = addr; } for(j=0; j<nelem(conf.mem); j++){ conf.mem[j].base = addr; conf.mem[j].limit = addr; for(i = 0; i < 512; i++){ if(probemem(addr) == 0) break; addr += OneMeg; } for(; i < 512; i++){ if(probemem(addr) < 0) break; addr += OneMeg; conf.mem[j].limit = addr; } } conf.npage = 0; for(i=0; i<nelem(conf.mem); i++){ /* take kernel out of allocatable space */ ktop = PGROUND((ulong)end); if(ktop >= conf.mem[i].base && ktop <= conf.mem[i].limit) conf.mem[i].base = ktop; /* zero memory */ memset((void*)conf.mem[i].base, 0, conf.mem[i].limit - conf.mem[i].base); conf.mem[i].npage = (conf.mem[i].limit - conf.mem[i].base)/BY2PG; conf.npage += conf.mem[i].npage; } if(conf.npage > 16*MB/BY2PG){ conf.upages = (conf.npage*60)/100; imagmem->minarena = 4*1024*1024; }else conf.upages = (conf.npage*40)/100; conf.ialloc = ((conf.npage-conf.upages)/2)*BY2PG; /* only one processor */ conf.nmach = 1; /* set up other configuration parameters */ conf.nproc = 100; conf.nswap = conf.npage*3; conf.nswppo = 4096; conf.nimage = 200; conf.monitor = 1; conf.copymode = 0; /* copy on write */ }
void confinit(void) { int i, userpcnt; ulong kpages; uintptr pa; char *p; if(p = getconf("service")){ if(strcmp(p, "cpu") == 0) cpuserver = 1; else if(strcmp(p,"terminal") == 0) cpuserver = 0; } if(p = getconf("*kernelpercent")) userpcnt = 100 - strtol(p, 0, 0); else userpcnt = 0; if((p = getconf("*maxmem")) != nil){ memsize = strtoul(p, 0, 0) - PHYSDRAM; if (memsize < 16*MB) /* sanity */ memsize = 16*MB; } getramsize(&conf.mem[0]); if(conf.mem[0].limit == 0){ conf.mem[0].base = PHYSDRAM; conf.mem[0].limit = PHYSDRAM + memsize; }else if(p != nil) conf.mem[0].limit = conf.mem[0].base + memsize; conf.npage = 0; pa = PADDR(PGROUND(PTR2UINT(end))); /* * we assume that the kernel is at the beginning of one of the * contiguous chunks of memory and fits therein. */ for(i=0; i<nelem(conf.mem); i++){ /* take kernel out of allocatable space */ if(pa > conf.mem[i].base && pa < conf.mem[i].limit) conf.mem[i].base = pa; conf.mem[i].npage = (conf.mem[i].limit - conf.mem[i].base)/BY2PG; conf.npage += conf.mem[i].npage; } if(userpcnt < 10) userpcnt = 60 + cpuserver*10; kpages = conf.npage - (conf.npage*userpcnt)/100; /* * can't go past the end of virtual memory * (ulong)-KZERO is 2^32 - KZERO */ if(kpages > ((ulong)-KZERO)/BY2PG) kpages = ((ulong)-KZERO)/BY2PG; conf.upages = conf.npage - kpages; conf.ialloc = (kpages/2)*BY2PG; /* only one processor */ conf.nmach = 1; /* set up other configuration parameters */ conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; if(cpuserver) conf.nproc *= 3; if(conf.nproc > 2000) conf.nproc = 2000; conf.nswap = conf.npage*3; conf.nswppo = 4096; conf.nimage = 200; conf.copymode = 0; /* copy on write */ /* * Guess how much is taken by the large permanent * datastructures. Mntcache and Mntrpc are not accounted for. */ kpages = conf.npage - conf.upages; kpages *= BY2PG; kpages -= conf.upages*sizeof(Page) + conf.nproc*sizeof(Proc) + conf.nimage*sizeof(Image) + conf.nswap + conf.nswppo*sizeof(Page*); mainmem->maxsize = kpages; if(!cpuserver) /* * give terminals lots of image memory, too; the dynamic * allocation will balance the load properly, hopefully. * be careful with 32-bit overflow. */ imagmem->maxsize = kpages; }
long ibrk(ulong addr, int seg) { Segment *s, *ns; ulong newtop, newsize; int i, mapsize; Pte **map; s = up->seg[seg]; if(s == 0) error(Ebadarg); if(addr == 0) return s->base; qlock(&s->lk); /* We may start with the bss overlapping the data */ if(addr < s->base) { if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) { qunlock(&s->lk); error(Enovmem); } addr = s->base; } newtop = PGROUND(addr); newsize = (newtop-s->base)/BY2PG; if(newtop < s->top) { mfreeseg(s, newtop, (s->top-newtop)/BY2PG); s->top = newtop; s->size = newsize; qunlock(&s->lk); flushmmu(); return 0; } if(swapfull()){ qunlock(&s->lk); error(Enoswap); } for(i = 0; i < NSEG; i++) { ns = up->seg[i]; if(ns == 0 || ns == s) continue; if(newtop >= ns->base && newtop < ns->top) { qunlock(&s->lk); error(Esoverlap); } } if(newsize > (SEGMAPSIZE*PTEPERTAB)) { qunlock(&s->lk); error(Enovmem); } mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB; if(mapsize > s->mapsize){ map = smalloc(mapsize*sizeof(Pte*)); memmove(map, s->map, s->mapsize*sizeof(Pte*)); if(s->map != s->ssegmap) free(s->map); s->map = map; s->mapsize = mapsize; } s->top = newtop; s->size = newsize; qunlock(&s->lk); return 0; }
uintptr sysexec(va_list list) { Segment *s, *ts; int i; Chan *tc; char **argv, **argp, **argp0; char *a, *e, *charp, *args, *file, *file0; char *progarg[sizeof(Exec)/2+1], *elem, progelem[64]; ulong magic, ssize, nargs, nbytes, n; uintptr t, d, b, entry, bssend, text, data, bss, tstk, align; int indir; Exec exec; char line[sizeof(Exec)]; Fgrp *f; Image *img; Tos *tos; args = elem = nil; file0 = va_arg(list, char*); validaddr((uintptr)file0, 1, 0); argp0 = va_arg(list, char**); evenaddr((uintptr)argp0); validaddr((uintptr)argp0, 2*BY2WD, 0); if(*argp0 == nil) error(Ebadarg); file0 = validnamedup(file0, 1); if(waserror()){ free(file0); free(elem); free(args); /* Disaster after commit */ if(up->seg[SSEG] == nil) pexit(up->errstr, 1); s = up->seg[ESEG]; if(s != nil){ putseg(s); up->seg[ESEG] = nil; } nexterror(); } align = BY2PG; indir = 0; file = file0; for(;;){ tc = namec(file, Aopen, OEXEC, 0); if(waserror()){ cclose(tc); nexterror(); } if(!indir) kstrdup(&elem, up->genbuf); n = devtab[tc->type]->read(tc, &exec, sizeof(Exec), 0); if(n <= 2) error(Ebadexec); magic = l2be(exec.magic); if(n == sizeof(Exec) && (magic == AOUT_MAGIC)){ entry = l2be(exec.entry); text = l2be(exec.text); if(magic & HDR_MAGIC) text += 8; switch(magic){ case S_MAGIC: /* 2MB segment alignment for amd64 */ align = 0x200000; break; case V_MAGIC: /* 16K segment alignment for mips */ align = 0x4000; break; } if(text >= (USTKTOP-USTKSIZE)-(UTZERO+sizeof(Exec)) || entry < UTZERO+sizeof(Exec) || entry >= UTZERO+sizeof(Exec)+text) error(Ebadexec); break; /* for binary */ } /* * Process #! /bin/sh args ... */ memmove(line, &exec, n); if(indir || line[0]!='#' || line[1]!='!') error(Ebadexec); n = shargs(line, n, progarg); if(n < 1) error(Ebadexec); indir = 1; /* * First arg becomes complete file name */ progarg[n++] = file; progarg[n] = nil; argp0++; file = progarg[0]; if(strlen(elem) >= sizeof progelem) error(Ebadexec); strcpy(progelem, elem); progarg[0] = progelem; poperror(); cclose(tc); } data = l2be(exec.data); bss = l2be(exec.bss); align--; t = (UTZERO+sizeof(Exec)+text+align) & ~align; align = BY2PG-1; d = (t + data + align) & ~align; bssend = t + data + bss; b = (bssend + align) & ~align; if(t >= (USTKTOP-USTKSIZE) || d >= (USTKTOP-USTKSIZE) || b >= (USTKTOP-USTKSIZE)) error(Ebadexec); /* * Args: pass 1: count */ nbytes = sizeof(Tos); /* hole for profiling clock at top of stack (and more) */ nargs = 0; if(indir){ argp = progarg; while(*argp != nil){ a = *argp++; nbytes += strlen(a) + 1; nargs++; } } argp = argp0; while(*argp != nil){ a = *argp++; if(((uintptr)argp&(BY2PG-1)) < BY2WD) validaddr((uintptr)argp, BY2WD, 0); validaddr((uintptr)a, 1, 0); e = vmemchr(a, 0, USTKSIZE); if(e == nil) error(Ebadarg); nbytes += (e - a) + 1; if(nbytes >= USTKSIZE) error(Enovmem); nargs++; } ssize = BY2WD*(nargs+1) + ((nbytes+(BY2WD-1)) & ~(BY2WD-1)); /* * 8-byte align SP for those (e.g. sparc) that need it. * execregs() will subtract another 4 bytes for argc. */ if(BY2WD == 4 && (ssize+4) & 7) ssize += 4; if(PGROUND(ssize) >= USTKSIZE) error(Enovmem); /* * Build the stack segment, putting it in kernel virtual for the moment */ qlock(&up->seglock); if(waserror()){ qunlock(&up->seglock); nexterror(); } s = up->seg[SSEG]; do { tstk = s->base; if(tstk <= USTKSIZE) error(Enovmem); } while((s = isoverlap(up, tstk-USTKSIZE, USTKSIZE)) != nil); up->seg[ESEG] = newseg(SG_STACK, tstk-USTKSIZE, USTKSIZE/BY2PG); /* * Args: pass 2: assemble; the pages will be faulted in */ tos = (Tos*)(tstk - sizeof(Tos)); tos->cyclefreq = m->cyclefreq; tos->kcycles = 0; tos->pcycles = 0; tos->clock = 0; argv = (char**)(tstk - ssize); charp = (char*)(tstk - nbytes); if(indir) argp = progarg; else argp = argp0; for(i=0; i<nargs; i++){ if(indir && *argp==nil) { indir = 0; argp = argp0; } *argv++ = charp + (USTKTOP-tstk); a = *argp++; if(indir) e = strchr(a, 0); else { validaddr((uintptr)a, 1, 0); e = vmemchr(a, 0, (char*)tstk - charp); if(e == nil) error(Ebadarg); } n = (e - a) + 1; memmove(charp, a, n); charp += n; } /* copy args; easiest from new process's stack */ a = (char*)(tstk - nbytes); n = charp - a; if(n > 128) /* don't waste too much space on huge arg lists */ n = 128; args = smalloc(n); memmove(args, a, n); if(n>0 && args[n-1]!='\0'){ /* make sure last arg is NUL-terminated */ /* put NUL at UTF-8 character boundary */ for(i=n-1; i>0; --i) if(fullrune(args+i, n-i)) break; args[i] = 0; n = i+1; } /* * Committed. * Free old memory. * Special segments are maintained across exec */ for(i = SSEG; i <= BSEG; i++) { putseg(up->seg[i]); /* prevent a second free if we have an error */ up->seg[i] = nil; } for(i = ESEG+1; i < NSEG; i++) { s = up->seg[i]; if(s != nil && (s->type&SG_CEXEC) != 0) { putseg(s); up->seg[i] = nil; } } /* * Close on exec */ if((f = up->fgrp) != nil) { for(i=0; i<=f->maxfd; i++) fdclose(i, CCEXEC); } /* Text. Shared. Attaches to cache image if possible */ /* attachimage returns a locked cache image */ img = attachimage(SG_TEXT|SG_RONLY, tc, UTZERO, (t-UTZERO)>>PGSHIFT); ts = img->s; up->seg[TSEG] = ts; ts->flushme = 1; ts->fstart = 0; ts->flen = sizeof(Exec)+text; unlock(img); /* Data. Shared. */ s = newseg(SG_DATA, t, (d-t)>>PGSHIFT); up->seg[DSEG] = s; /* Attached by hand */ incref(img); s->image = img; s->fstart = ts->fstart+ts->flen; s->flen = data; /* BSS. Zero fill on demand */ up->seg[BSEG] = newseg(SG_BSS, d, (b-d)>>PGSHIFT); /* * Move the stack */ s = up->seg[ESEG]; up->seg[ESEG] = nil; s->base = USTKTOP-USTKSIZE; s->top = USTKTOP; relocateseg(s, USTKTOP-tstk); up->seg[SSEG] = s; qunlock(&up->seglock); poperror(); /* seglock */ /* * '/' processes are higher priority (hack to make /ip more responsive). */ if(devtab[tc->type]->dc == L'/') up->basepri = PriRoot; up->priority = up->basepri; poperror(); /* tc */ cclose(tc); poperror(); /* file0 */ free(file0); qlock(&up->debug); free(up->text); up->text = elem; free(up->args); up->args = args; up->nargs = n; up->setargs = 0; up->nnote = 0; up->notify = 0; up->notified = 0; up->privatemem = 0; up->noswap = 0; procsetup(up); qunlock(&up->debug); /* * At this point, the mmu contains info about the old address * space and needs to be flushed */ flushmmu(); if(up->hang) up->procctl = Proc_stopme; return execregs(entry, ssize, nargs); }
void confinit(void) { ulong ktop, kpages; Bank *b, *eb; extern void _main(void); int userpcnt; char *p; if(p = getconf("*kernelpercent")) userpcnt = 100 - strtol(p, 0, 0); else userpcnt = 0; /* * The console firmware divides memory into 1 or more banks. * FInd the bank with the kernel in it. */ b = bootconf->bank; eb = b+bootconf->nbank; ktop = PGROUND((ulong)end); ktop = PADDR(ktop); while(b < eb) { if(b->min < ktop && ktop < b->max) break; b++; } if(b == eb) panic("confinit"); /* * Split the bank of memory into 2 banks to fool the allocator into * allocating low memory pages from bank 0 for any peripherals * which only have a 24bit address counter. */ conf.mem[0].npage = (8*1024*1024)/BY2PG; conf.mem[0].base = 0; conf.mem[1].npage = (b->max-8*1024*1024)/BY2PG; conf.mem[1].base = 8*1024*1024; conf.npage = conf.mem[0].npage+conf.mem[1].npage; conf.upages = (conf.npage*70)/100; conf.mem[0].npage -= ktop/BY2PG; conf.mem[0].base += ktop; conf.ialloc = ((conf.npage-conf.upages)/2)*BY2PG; /* * Fix up the bank we found to be the remnant, below the kernel. * This, and the other banks, will be passed to xhole() later. * BUG: conf.upages needs to be adjusted, but how? In practice, * we only have 1 bank, and the remnant is small. */ b->max = (uvlong)_main & ~(BY2PG-1); conf.nmach = 1; conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; if(cpuserver) conf.nproc *= 3; if(conf.nproc > 2000) conf.nproc = 2000; conf.nimage = 200; conf.nswap = conf.nproc*80; conf.nswppo = 4096; conf.copymode = 0; /* copy on write */ if(cpuserver) { if(userpcnt < 10) userpcnt = 70; kpages = conf.npage - (conf.npage*userpcnt)/100; /* * Hack for the big boys. Only good while physmem < 4GB. * Give the kernel a max. of 16MB + enough to allocate the * page pool. * This is an overestimate as conf.upages < conf.npages. * The patch of nimage is a band-aid, scanning the whole * page list in imagereclaim just takes too long. */ if(kpages > (16*MB + conf.npage*sizeof(Page))/BY2PG){ kpages = (16*MB + conf.npage*sizeof(Page))/BY2PG; conf.nimage = 2000; kpages += (conf.nproc*KSTACK)/BY2PG; } } else { if(userpcnt < 10) { if(conf.npage*BY2PG < 16*MB) userpcnt = 40; else userpcnt = 60; } kpages = conf.npage - (conf.npage*userpcnt)/100; /* * Make sure terminals with low memory get at least * 4MB on the first Image chunk allocation. */ if(conf.npage*BY2PG < 16*MB) imagmem->minarena = 4*1024*1024; } conf.upages = conf.npage - kpages; conf.ialloc = (kpages/2)*BY2PG; /* * Guess how much is taken by the large permanent * datastructures. Mntcache and Mntrpc are not accounted for. */ kpages *= BY2PG; kpages -= conf.upages*sizeof(Page) + conf.nproc*sizeof(Proc) + conf.nimage*sizeof(Image) + conf.nswap + conf.nswppo*sizeof(Page*); mainmem->maxsize = kpages; if(!cpuserver){ /* * give terminals lots of image memory, too; the dynamic * allocation will balance the load properly, hopefully. * be careful with 32-bit overflow. */ imagmem->maxsize = kpages; } // conf.monitor = 1; /* BUG */ }
void confinit(void) { ulong i; ulong ktop; conf.monitor = 0; conf.nmach = 1; if(conf.nmach > MAXMACH) panic("confinit"); /* fetch ID prom */ physcopyin(&idprom, NVR_PHYS+IDOFF, sizeof(idprom)); if(idprom.format!=1 || (idprom.type&0xF0)!=0x80) *(ulong*)~0 = 0; /* not a new generation sparc; die! */ for(sparam = sysparam; sparam->id; sparam++) if(sparam->id == idprom.type) break; /* First entry in the table is the default */ if(sparam->id == 0) sparam = sysparam; conf.ss2 = sparam->ss2; conf.vacsize = sparam->vacsize; conf.vaclinesize = sparam->vacline; conf.ncontext = sparam->ncontext; conf.ss2cachebug = sparam->cachebug; for(i=0; i<sparam->nbank; i++) if(probemem(i*sparam->banksize*MB)) scanbank(i*sparam->banksize*MB, mempres, sparam->banksize); bank[0] = conf.npage0*BY2PG/MB; bank[1] = conf.npage1*BY2PG/MB; if(bank[1] == 0){ /* * This split of memory into 2 banks fools the allocator into * allocating low memory pages from bank 0 for the ethernet * since it has only a 24bit address *counter. * NB. Suns must have at LEAST 8Mbytes. */ conf.npage1 = conf.npage0 - (8*MB)/BY2PG; conf.base1 = conf.base0 + 8*MB; conf.npage0 = (8*MB)/BY2PG; bank[1] = bank[0]-8; bank[0] = 8; } conf.npage = conf.npage0+conf.npage1; ktop = PGROUND((ulong)end); ktop = PADDR(ktop); conf.npage0 -= ktop/BY2PG; conf.base0 += ktop; conf.nproc = 100 + ((conf.npage*BY2PG)/MB)*5; conf.copymode = 0; /* copy on write */ conf.arp = 32; conf.ialloc = (((conf.npage*(100-sparam->pcnt))/100)/2)*BY2PG; eve = strdup("inferno"); #ifdef notdef /* XXX - Eric - Autoconfigure memory */ /* XXX - Tad: 8 eigths, total... */ mainmem->maxsize = (conf.npage*BY2PG)/8; heapmem->maxsize = ((conf.npage*BY2PG)*5)/8; imagmem->maxsize = ((conf.npage*BY2PG)*2)/8; #endif }