void sysziofree(Ar0 *ar0, ...) { Mach *m = machp(); Zio *io; int nio, i; Segment *s; va_list list; va_start(list, ar0); /* * zfree(Zio io[], int nio); */ io = va_arg(list, Zio*); nio = va_arg(list, int); va_end(list); io = validaddr(io, sizeof io[0] * nio, 1); for(i = 0; i < nio; i++){ s = seg(m->externup, PTR2UINT(io[i].data), 1); if(s == nil) error("invalid address in zio"); if((s->type&SG_ZIO) == 0){ qunlock(&s->lk); error("segment is not a zero-copy segment"); } zputaddr(s, PTR2UINT(io[i].data)); qunlock(&s->lk); io[i].data = nil; io[i].size = 0; } }
void syssemalt(Ar0 *ar0, ...) { Proc *up = externup(); int **sl; int i, *np, ns; Segment *sg; Sem *ksl[16]; va_list list; va_start(list, ar0); /* * void semalt(int*[], int); */ ar0->i = -1; sl = va_arg(list, int**); ns = va_arg(list, int); sl = validaddr(sl, ns * sizeof(int*), 1); if(ns > nelem(ksl)) panic("syssemalt: bug: too many semaphores in alt"); for(i = 0; i < ns; i++){ np = sl[i]; np = validaddr(np, sizeof(int), 1); evenaddr(PTR2UINT(np)); if((sg = seg(up, PTR2UINT(np), 0)) == nil) error(Ebadarg); ksl[i] = segmksem(sg, np); } ar0->i = semalt(ksl, ns); va_end(list); }
void sysrforkchild(Proc* child, Proc* parent) { Ureg *cureg; // If STACKPAD is 1 things go very bad very quickly. // But it is the right value ... #define STACKPAD 1 /* for return PC? */ /* * Add STACKPAD*BY2SE to the stack to account for * - the return PC * (NOT NOW) - trap's arguments (syscallnr, ureg) */ child->sched.sp = PTR2UINT(child->kstack+KSTACK-((sizeof(Ureg)+STACKPAD*BY2SE))); child->sched.pc = PTR2UINT(sysrforkret); cureg = (Ureg*)(child->sched.sp+STACKPAD*BY2SE); memmove(cureg, parent->dbgreg, sizeof(Ureg)); /* Things from bottom of syscall which were never executed */ child->psstate = 0; child->insyscall = 0; //iprint("Child SP set tp %p\n", (void *)child->sched.sp); fpusysrforkchild(child, parent); }
static void _kproftimer(uintptr_t pc) { if(kprof.time == 0) return; /* * if the pc corresponds to the idle loop, don't consider it. if(m->inidle) return; */ /* * if the pc is coming out of spllo or splx, * use the pc saved when we went splhi. */ if(pc>=PTR2UINT(spllo) && pc<=PTR2UINT(spldone)) pc = machp()->splpc; ilock(&kprof.l); kprof.buf[0] += TK2MS(1); if(kprof.minpc<=pc && pc<kprof.maxpc){ pc -= kprof.minpc; pc >>= LRES; kprof.buf[pc] += TK2MS(1); }else
/* * setup stack and initial PC for a new kernel proc. This is architecture * dependent because of the starting stack location */ void kprocchild(Proc *p, void (*func)(void*), void *arg) { p->sched.pc = PTR2UINT(linkproc); p->sched.sp = PTR2UINT(p->kstack+KSTACK); p->kpfun = func; p->kparg = arg; }
void kprocchild(Proc* p, void (*func)(void*), void* arg) { /* * gotolabel() needs a word on the stack in * which to place the return PC used to jump * to linkproc(). */ p->sched.pc = PTR2UINT(linkproc); p->sched.sp = PTR2UINT(p->kstack+KSTACK-BY2SE); p->sched.sp = STACKALIGN(p->sched.sp); p->kpfun = func; p->kparg = arg; }
void xinit(void) { int i, n, upages, kpages; ulong maxkpa; Confmem *m; Pallocmem *pm; Hole *h, *eh; eh = &xlists.hole[Nhole-1]; for(h = xlists.hole; h < eh; h++) h->link = h+1; xlists.flist = xlists.hole; upages = conf.upages; kpages = conf.npage - upages; pm = palloc.mem; maxkpa = -KZERO; for(i=0; i<nelem(conf.mem); i++){ m = &conf.mem[i]; n = m->npage; if(n > kpages) n = kpages; if(m->base >= maxkpa) n = 0; else if(n > 0 && m->base+n*PGSZ >= maxkpa) n = (maxkpa - m->base)/PGSZ; /* first give to kernel */ if(n > 0){ m->kbase = PTR2UINT(KADDR(m->base)); m->klimit = PTR2UINT(KADDR(m->base+n*PGSZ)); xhole(m->base, n*PGSZ); kpages -= n; } /* if anything left over, give to user */ if(n < m->npage){ if(pm >= palloc.mem+nelem(palloc.mem)){ print("xinit: losing %lud pages\n", m->npage-n); continue; } pm->base = m->base+n*PGSZ; pm->npage = m->npage - n; pm++; } } xsummary(); }
/* * This is the counterpart of devzread in some sense, * it reads in the traditional way from io[]. */ int32_t readzio(Kzio *io, int nio, void *a, int32_t count) { int32_t tot, nr; char *p; p = a; tot = 0; while(nio-- > 0){ if(tot < count){ nr = io->size; if(tot + nr > count) nr = count - tot; DBG("readzio: copy %#p %Z\n", p+tot, io); memmove(p+tot, io->data, nr); tot += nr; } qlock(&io->seg->lk); zputaddr(io->seg, PTR2UINT(io->data)); qunlock(&io->seg->lk); putseg(io->seg); io->seg = nil; io++; } return tot; }
static void bootargs(uintptr base) { int i; ulong ssize; char **av, *p; /* * Push the boot args onto the stack. * The initial value of the user stack must be such * that the total used is larger than the maximum size * of the argument list checked in syscall. */ i = oargblen+1; p = UINT2PTR(STACKALIGN(base + BY2PG - sizeof(Tos) - i)); memmove(p, oargb, i); /* * Now push the argv pointers. * The code jumped to by touser in lproc.s expects arguments * main(char* argv0, ...) * and calls * startboot("/boot/boot", &argv0) * not the usual (int argc, char* argv[]) */ av = (char**)(p - (oargc+1)*sizeof(char*)); ssize = base + BY2PG - PTR2UINT(av); for(i = 0; i < oargc; i++) *av++ = (oargv[i] - oargb) + (p - base) + (USTKTOP - BY2PG); *av = nil; sp = USTKTOP - ssize; }
/** * @param hl a hashlist. * @param key the key to look for. * @param bin if not NULL, it will be set to the bin number that is or would * be used for the key. It is set regardless whether the key is in * the hashlist. * @return NULL if the key is not in the hashlist. Otherwise, the item * associated with the key is returned. */ static hash_item_t * hashlist_find(hashlist_t *hl, const void *key, uint32_t *bin) { hash_item_t *item; uint32_t hash, b; HASHLIST_CHECK(hl); hash = hl->hash ? hl->hash(key) : (uint32_t) PTR2UINT(key); b = (hash ^ hl->rnd) % hl->num_bins; item = hl->bins[b]; if (bin) { *bin = b; } if (hl->cmp) { for (/* NOTHING */; item != NULL; item = item->bnext) { if (hl->cmp(key, item->node.ptr)) return item; } } else { for (/* NOTHING */; item != NULL; item = item->bnext) { if (key == item->node.ptr) return item; } } return NULL; }
void vunmap(void* v, usize size) { Proc *up = externup(); uintptr_t va; DBG("vunmap(%#p, %lud)\n", v, size); if(machp()->machno != 0) panic("vunmap"); /* * See the comments above in vmap. */ va = PTR2UINT(v); if(va >= KZERO && va+size < KZERO+1ull*MiB) return; /* * Here will have to deal with releasing any * resources used for the allocation (e.g. page table * pages). */ DBG("vunmap(%#p, %lud)\n", v, size); }
static Block* _allocb(int size) { Block *b; uint8_t *p; int n; n = BLOCKALIGN + ROUNDUP(size+Hdrspc, BLOCKALIGN) + sizeof(Block); if((p = malloc(n)) == nil) return nil; b = (Block*)(p + n - sizeof(Block)); /* block at end of allocated space */ b->base = p; b->next = nil; b->list = nil; b->free = 0; b->flag = 0; /* align base and bounds of data */ b->lim = (uint8_t*)(PTR2UINT(b) & ~(BLOCKALIGN-1)); /* align start of writable data, leaving space below for added headers */ b->rp = b->lim - ROUNDUP(size, BLOCKALIGN); b->wp = b->rp; if(b->rp < b->base || b->lim - b->rp < size) panic("_allocb"); return b; }
void* mmuuncache(void* v, usize size) { int x; PTE *pte; uintptr va; /* * Simple helper for ucalloc(). * Uncache a Section, must already be * valid in the MMU. */ va = PTR2UINT(v); assert(!(va & (1*MiB-1)) && size == 1*MiB); x = L1X(va); pte = &m->mmul1[x]; if((*pte & (Fine|Section|Coarse)) != Section) return nil; *pte &= ~(Cached|Buffered); mmuinvalidateaddr(va); cachedwbinvse(pte, 4); return v; }
void mmuswitch(Proc* proc) { PTE *pte; Page *page; Mpl pl; pl = splhi(); if(proc->newtlb) { /* * NIX: We cannot clear our page tables if they are going to * be used in the AC */ if(proc->ac == nil) mmuptpfree(proc, 1); proc->newtlb = 0; } if(machp()->MMU.pml4->daddr) { memset(UINT2PTR(machp()->MMU.pml4->va), 0, machp()->MMU.pml4->daddr*sizeof(PTE)); machp()->MMU.pml4->daddr = 0; } pte = UINT2PTR(machp()->MMU.pml4->va); for(page = proc->MMU.mmuptp[3]; page != nil; page = page->next) { pte[page->daddr] = PPN(page->pa)|PteU|PteRW|PteP; if(page->daddr >= machp()->MMU.pml4->daddr) machp()->MMU.pml4->daddr = page->daddr+1; page->prev = machp()->MMU.pml4; } tssrsp0(machp(), STACKALIGN(PTR2UINT(proc->kstack+KSTACK))); cr3put(machp()->MMU.pml4->pa); splx(pl); }
void bootargs(uintptr base) { int i; ulong ssize; char **av, *p; /* * Push the boot args onto the stack. * Make sure the validaddr check in syscall won't fail * because there are fewer than the maximum number of * args by subtracting sizeof(up->arg). */ i = oargblen+1; p = UINT2PTR(STACKALIGN(base + BIGPGSZ - sizeof(up->arg) - i)); memmove(p, oargb, i); /* * Now push argc and the argv pointers. * This isn't strictly correct as the code jumped to by * touser in init9.[cs] calls startboot (port/initcode.c) which * expects arguments * startboot(char* argv0, char* argv[]) * not the usual (int argc, char* argv[]), but argv0 is * unused so it doesn't matter (at the moment...). */ av = (char**)(p - (oargc+2)*sizeof(char*)); ssize = base + BIGPGSZ - PTR2UINT(av); *av++ = (char*)oargc; for(i = 0; i < oargc; i++) *av++ = (oargv[i] - oargb) + (p - base) + (USTKTOP - BIGPGSZ); *av = nil; sp = USTKTOP - ssize; }
void* xspanalloc(ulong size, int align, ulong span) { uintptr a, v, t; a = PTR2UINT(xalloc(size+align+span)); if(a == 0) panic("xspanalloc: %lud %d %lux\n", size, align, span); if(span > 2) { v = (a + span) & ~(span-1); t = v - a; if(t > 0) xhole(PADDR(UINT2PTR(a)), t); t = a + span - v; if(t > 0) xhole(PADDR(UINT2PTR(v+size+align)), t); } else v = a; if(align > 1) v = (v + align) & ~(align-1); return (void*)v; }
ushort ptclbsum(uchar *addr, int len) { ulong losum, hisum, mdsum, x; ulong t1, t2; losum = 0; hisum = 0; mdsum = 0; x = 0; if(PTR2UINT(addr) & 1) { if(len) { hisum += addr[0]; len--; addr++; } x = 1; } while(len >= 16) { t1 = *(ushort*)(addr+0); t2 = *(ushort*)(addr+2); mdsum += t1; t1 = *(ushort*)(addr+4); mdsum += t2; t2 = *(ushort*)(addr+6); mdsum += t1; t1 = *(ushort*)(addr+8); mdsum += t2; t2 = *(ushort*)(addr+10); mdsum += t1; t1 = *(ushort*)(addr+12); mdsum += t2; t2 = *(ushort*)(addr+14); mdsum += t1; mdsum += t2; len -= 16; addr += 16; } while(len >= 2) { mdsum += *(ushort*)addr; len -= 2; addr += 2; } if(x) { if(len) losum += addr[0]; if(LITTLE) losum += mdsum; else hisum += mdsum; } else { if(len) hisum += addr[0]; if(LITTLE) hisum += mdsum; else losum += mdsum; } losum += hisum >> 8; losum += (hisum & 0xff) << 8; while(hisum = losum>>16) losum = hisum + (losum & 0xffff); return losum & 0xffff; }
/* Give enough context in the ureg to produce a kernel stack for * a sleeping process */ void setkernur(Ureg* ureg, Proc* p) { ureg->pc = p->sched.pc; ureg->sp = p->sched.sp+4; ureg->r14 = PTR2UINT(sched); }
void* sysexecregs(uintptr_t entry, uint32_t ssize, void *tos) { Proc *up = externup(); uintptr_t *sp; Ureg *ureg; // We made sure it was correctly aligned in sysexecstack, above. if (ssize & 0xf) { print("your stack is wrong: stacksize is not 16-byte aligned: %d\n", ssize); panic("misaligned stack in sysexecregs"); } sp = (uintptr_t*)(USTKTOP - ssize); ureg = up->dbgreg; ureg->sp = PTR2UINT(sp); ureg->ip = entry; ureg->type = 64; /* fiction for acid */ ureg->dx = (uintptr_t)tos; /* * return the address of kernel/user shared data * (e.g. clock stuff) */ return UINT2PTR(USTKTOP-sizeof(Tos)); }
void Tcl_DeleteHashEntry( Tcl_HashEntry *entryPtr) { register Tcl_HashEntry *prevPtr; const Tcl_HashKeyType *typePtr; Tcl_HashTable *tablePtr; Tcl_HashEntry **bucketPtr; #if TCL_HASH_KEY_STORE_HASH int index; #endif tablePtr = entryPtr->tablePtr; if (tablePtr->keyType == TCL_STRING_KEYS) { typePtr = &tclStringHashKeyType; } else if (tablePtr->keyType == TCL_ONE_WORD_KEYS) { typePtr = &tclOneWordHashKeyType; } else if (tablePtr->keyType == TCL_CUSTOM_TYPE_KEYS || tablePtr->keyType == TCL_CUSTOM_PTR_KEYS) { typePtr = tablePtr->typePtr; } else { typePtr = &tclArrayHashKeyType; } #if TCL_HASH_KEY_STORE_HASH if (typePtr->hashKeyProc == NULL || typePtr->flags & TCL_HASH_KEY_RANDOMIZE_HASH) { index = RANDOM_INDEX (tablePtr, entryPtr->hash); } else { index = PTR2UINT(entryPtr->hash) & tablePtr->mask; } bucketPtr = &(tablePtr->buckets[index]); #else bucketPtr = entryPtr->bucketPtr; #endif if (*bucketPtr == entryPtr) { *bucketPtr = entryPtr->nextPtr; } else { for (prevPtr = *bucketPtr; ; prevPtr = prevPtr->nextPtr) { if (prevPtr == NULL) { Tcl_Panic("malformed bucket chain in Tcl_DeleteHashEntry"); } if (prevPtr->nextPtr == entryPtr) { prevPtr->nextPtr = entryPtr->nextPtr; break; } } } tablePtr->numEntries--; if (typePtr->freeEntryProc) { typePtr->freeEntryProc (entryPtr); } else { ckfree((char *) entryPtr); } }
/* * Fill in enough of Ureg to get a stack trace, and call a function. * Used by debugging interface rdb. */ void callwithureg(void (*fn)(Ureg*)) { Ureg ureg; ureg.ip = getcallerpc(&fn); ureg.sp = PTR2UINT(&fn); fn(&ureg); }
/*Restrict event processing to Expose events.*/ static Tk_RestrictAction ExposeRestrictProc( ClientData arg, XEvent *eventPtr) { return (eventPtr->type==Expose && eventPtr->xany.serial==PTR2UINT(arg) ? TK_PROCESS_EVENT : TK_DEFER_EVENT); }
int devzwrite(Chan *c, Kzio io[], int nio, int64_t offset) { Mach *m = machp(); int i, j; int32_t tot; Block *bp; DBG("devzwrite %#p[%d]\n", io, nio); tot = 0; for(i = 0; i < nio; i++) tot += io[i].size; bp = nil; if(waserror()){ if(bp != nil) freeb(bp); nexterror(); } if(nio == 1) tot = c->dev->write(c, io[0].data, io[0].size, offset); else{ bp = allocb(tot); if(bp == nil) error(Enomem); for(i = 0; i < nio; i++){ DBG("devzwrite: copy %#p %Z\n", bp->wp, &io[i]); memmove(bp->wp, io[i].data, io[i].size); bp->wp += io[i].size; qlock(&io[i].seg->lk); if(zputaddr(io[i].seg, PTR2UINT(io[i].data)) < 0) panic("devzwrite: not a shared data segment"); qunlock(&io[i].seg->lk); } tot = c->dev->bwrite(c, bp, offset); } j = 0; for(i = 0; i < nio; i++){ io[i].data = nil; /* safety */ io[i].seg = nil; putseg(io[i].seg); if(tot > 0) if(tot >= io[i].size) tot -= io[i].size; else io[i].size = tot; else{ j = i; io[i].size = 0; } io[i].data = nil; /* safety */ putseg(io[i].seg); io[i].seg = nil; } nio = j; poperror(); return nio; }
void dcrcompile(void) { ulong *p; int i; for(i=0; i<MAXDCR; i++){ p = _getdcr[i]; p[0] = MFDCR(i, 3); p[1] = RETURN; p = _putdcr[i]; p[0] = MTDCR(3, i); p[1] = RETURN; } dcflush(PTR2UINT(_getdcr), sizeof(_getdcr)); dcflush(PTR2UINT(_putdcr), sizeof(_putdcr)); /* no need to flush icache since they won't be there */ }
int xmerge(void *vp, void *vq) { Xhdr *p, *q; p = UINT2PTR((PTR2UINT(vp) - offsetof(Xhdr, data[0]))); q = UINT2PTR((PTR2UINT(vq) - offsetof(Xhdr, data[0]))); if(p->magix != Magichole || q->magix != Magichole) { xsummary(); panic("xmerge(%#p, %#p) bad magic %#lux, %#lux\n", vp, vq, p->magix, q->magix); } if((uchar*)p+p->size == (uchar*)q) { p->size += q->size; return 1; } return 0; }
void xfree(void *p) { Xhdr *x; x = UINT2PTR((PTR2UINT(p) - offsetof(Xhdr, data[0]))); if(x->magix != Magichole) { xsummary(); panic("xfree(%#p) %#ux != %#lux", p, Magichole, x->magix); } xhole(PADDR(x), x->size); }
void syssemwakeup(Ar0* ar0, ...) { Proc *up = externup(); int *np; Sem *s; Segment *sg; va_list list; va_start(list, ar0); /* * void semwakeup(int*); */ np = va_arg(list, int*); np = validaddr(np, sizeof *np, 1); evenaddr(PTR2UINT(np)); if((sg = seg(up, PTR2UINT(np), 0)) == nil) error(Ebadarg); s = segmksem(sg, np); semwakeup(s, 1, 1); va_end(list); }
void sysrforkchild(Proc* child, Proc* parent) { Ureg *cureg; /* * Add 3*BY2SE to the stack to account for * - the return PC * - trap's arguments (syscallnr, ureg) */ child->sched.sp = PTR2UINT(child->kstack+KSTACK-(sizeof(Ureg)+3*BY2SE)); child->sched.pc = PTR2UINT(sysrforkret); cureg = (Ureg*)(child->sched.sp+3*BY2SE); memmove(cureg, parent->dbgreg, sizeof(Ureg)); /* Things from bottom of syscall which were never executed */ child->psstate = 0; child->insyscall = 0; fpusysrforkchild(child, parent); }
uintmem PADDR(void* va) { uintmem pa; pa = PTR2UINT(va); if(pa >= KSEG0 && pa < KSEG0+TMFM) return pa-KSEG0; if(pa > KSEG2) return pa-KSEG2; panic("PADDR: va %#p pa #%p @ %#p\n", va, _PADDR(va), getcallerpc()); return 0; }
static Page* mmuptpalloc(void) { void* va; Page *page; /* * Do not really need a whole Page structure, * but it makes testing this out a lot easier. * Could keep a cache and free excess. * Have to maintain any fiction for pexit? */ lock(&mmuptpfreelist.l); if((page = mmuptpfreelist.next) != nil) { mmuptpfreelist.next = page->next; mmuptpfreelist.ref--; unlock(&mmuptpfreelist.l); if(page->ref++ != 0) panic("mmuptpalloc ref\n"); page->prev = page->next = nil; memset(UINT2PTR(page->va), 0, PTSZ); if(page->pa == 0) panic("mmuptpalloc: free page with pa == 0"); return page; } unlock(&mmuptpfreelist.l); if((page = malloc(sizeof(Page))) == nil) { print("mmuptpalloc Page\n"); return nil; } if((va = mallocalign(PTSZ, PTSZ, 0, 0)) == nil) { print("mmuptpalloc va\n"); free(page); return nil; } page->va = PTR2UINT(va); page->pa = PADDR(va); page->ref = 1; if(page->pa == 0) panic("mmuptpalloc: no pa"); return page; }