int main(void) { PRINTSTR("\n" POS_STARTUPSTRING "\n\n"); /* start multitasking and execute first task (task1) */ #if (POSCFG_TASKSTACKTYPE == 0) posInit(task1, NULL, 1, allocstack(0x0600), allocstack(0x0200)); #elif (POSCFG_TASKSTACKTYPE == 1) posInit(task1, NULL, 1, 0x0600, 0x0200); #elif (POSCFG_TASKSTACKTYPE == 2) posInit(task1, NULL, 1); #endif /* we will never get here */ return 0; }
/*-------------------------------------------------------------------------*/ static POSTASK_t startTask(POSTASKFUNC_t fp, void *arg, UVAR_t prio) { #if (POSCFG_TASKSTACKTYPE == 0) return posTaskCreate(fp, arg, prio, allocstack(0x0600)); #elif (POSCFG_TASKSTACKTYPE == 1) return posTaskCreate(fp, arg, prio, 0x0600); #elif (POSCFG_TASKSTACKTYPE == 2) return posTaskCreate(fp, arg, prio); #endif }
// Edit the thread state to send to the real kernel. // The real thread will run start_thread_NORETURN(tst) // on a separate non-client stack. void hijack_thread_state(thread_state_t mach_generic, thread_state_flavor_t flavor, mach_msg_type_number_t count, ThreadState *tst) { x86_thread_state64_t *mach = (x86_thread_state64_t *)mach_generic; char *stack; vg_assert(flavor == x86_THREAD_STATE64); vg_assert(count == x86_THREAD_STATE64_COUNT); stack = (char *)allocstack(tst->tid); stack -= 64+320; // make room for top frame memset(stack, 0, 64+320); // ...and clear it *(uintptr_t *)stack = 0; // push fake return address mach->__rdi = (uintptr_t)tst; // arg1 = tst mach->__rip = (uintptr_t)&start_thread_NORETURN; mach->__rsp = (uintptr_t)stack; }
/* wqthread note: The kernel may create or destroy pthreads in the wqthread pool at any time with no userspace interaction, and wqthread_start may be entered at any time with no userspace interaction. To handle this in valgrind, we create and destroy a valgrind thread for every work item. */ void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem, Int reuse, Addr sp) { ThreadState *tst; VexGuestAMD64State *vex; Addr stack; SizeT stacksize; vki_sigset_t blockall; /* When we enter here we hold no lock (!), so we better acquire it pronto. Why do we hold no lock? Because (presumably) the only way to get here is as a result of a SfMayBlock syscall "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the lock. At least that's clear for the 'reuse' case. The non-reuse case? Dunno, perhaps it's a new thread the kernel pulled out of a hat. In any case we still need to take a lock. */ VG_(acquire_BigLock_LL)("wqthread_hijack"); if (0) VG_(printf)( "wqthread_hijack: self %#lx, kport %#lx, " "stackaddr %#lx, workitem %#lx, reuse/flags %x, sp %#lx\n", self, kport, stackaddr, workitem, reuse, sp); /* Start the thread with all signals blocked. VG_(scheduler) will set the mask correctly when we finally get there. */ VG_(sigfillset)(&blockall); VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL); /* For 10.7 and earlier, |reuse| appeared to be used as a simple boolean. In 10.8 and later its name changed to |flags| and has various other bits OR-d into it too, so it's necessary to fish out just the relevant parts. Hence: */ # if DARWIN_VERS <= DARWIN_10_7 Bool is_reuse = reuse != 0; # elif DARWIN_VERS == DARWIN_10_8 || DARWIN_VERS == DARWIN_10_9 Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0; # endif if (is_reuse) { /* For whatever reason, tst->os_state.pthread appear to have a constant offset of 96 on 10.7, but zero on 10.6 and 10.5. No idea why. */ # if DARWIN_VERS <= DARWIN_10_6 UWord magic_delta = 0; # elif DARWIN_VERS == DARWIN_10_7 || DARWIN_VERS == DARWIN_10_8 UWord magic_delta = 0x60; # elif DARWIN_VERS == DARWIN_10_9 UWord magic_delta = 0xE0; # else # error "magic_delta: to be computed on new OS version" // magic_delta = tst->os_state.pthread - self # endif // This thread already exists; we're merely re-entering // after leaving via workq_ops(WQOPS_THREAD_RETURN). // Don't allocate any V thread resources. // Do reset thread registers. ThreadId tid = VG_(lwpid_to_vgtid)(kport); vg_assert(VG_(is_valid_tid)(tid)); vg_assert(mach_thread_self() == kport); tst = VG_(get_ThreadState)(tid); if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, " "tst->os_state.pthread %#lx\n", tst->os_state.pthread == self ? "SAME" : "DIFF", tid, tst, tst->os_state.pthread); vex = &tst->arch.vex; vg_assert(tst->os_state.pthread - magic_delta == self); } else { // This is a new thread. tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)()); vex = &tst->arch.vex; allocstack(tst->tid); LibVEX_GuestAMD64_initialise(vex); } // Set thread's registers // Do this FIRST because some code below tries to collect a backtrace, // which requires valid register data. vex->guest_RIP = wqthread_starter; vex->guest_RDI = self; vex->guest_RSI = kport; vex->guest_RDX = stackaddr; vex->guest_RCX = workitem; vex->guest_R8 = reuse; vex->guest_R9 = 0; vex->guest_RSP = sp; stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE stack = VG_PGROUNDUP(sp) - stacksize; if (is_reuse) { // Continue V's thread back in the scheduler. // The client thread is of course in another location entirely. /* Drop the lock before going into ML_(wqthread_continue_NORETURN). The latter will immediately attempt to reacquire it in non-LL mode, which is a bit wasteful but I don't think is harmful. A better solution would be to not drop the lock but instead "upgrade" it from a LL lock to a full lock, but that's too much like hard work right now. */ VG_(release_BigLock_LL)("wqthread_hijack(1)"); ML_(wqthread_continue_NORETURN)(tst->tid); } else { // Record thread's stack and Mach port and pthread struct tst->os_state.pthread = self; tst->os_state.lwpid = kport; record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p"); // kernel allocated stack - needs mapping tst->client_stack_highest_word = stack+stacksize; tst->client_stack_szB = stacksize; // GrP fixme scheduler lock?! // pthread structure ML_(notify_core_and_tool_of_mmap)( stack+stacksize, pthread_structsize, VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0); // stack contents // GrP fixme uninitialized! ML_(notify_core_and_tool_of_mmap)( stack, stacksize, VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0); // guard page // GrP fixme ban_mem_stack! ML_(notify_core_and_tool_of_mmap)( stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE, 0, VKI_MAP_PRIVATE, -1, 0); ML_(sync_mappings)("after", "wqthread_hijack", 0); // Go! /* Same comments as the 'release' in the then-clause. start_thread_NORETURN calls run_thread_NORETURN calls thread_wrapper which acquires the lock before continuing. Let's hope nothing non-thread-local happens until that point. DDD: I think this is plain wrong .. if we get to thread_wrapper not holding the lock, and someone has recycled this thread slot in the meantime, we're hosed. Is that possible, though? */ VG_(release_BigLock_LL)("wqthread_hijack(2)"); call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0, start_thread_NORETURN, (Word)tst); } /*NOTREACHED*/ vg_assert(0); }
/* wqthread note: The kernel may create or destroy pthreads in the wqthread pool at any time with no userspace interaction, and wqthread_start may be entered at any time with no userspace interaction. To handle this in valgrind, we create and destroy a valgrind thread for every work item. */ void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem, Int reuse, Addr sp) { ThreadState *tst; VexGuestX86State *vex; Addr stack; SizeT stacksize; vki_sigset_t blockall; /* When we enter here we hold no lock (!), so we better acquire it pronto. Why do we hold no lock? Because (presumably) the only way to get here is as a result of a SfMayBlock syscall "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the lock. At least that's clear for the 'reuse' case. The non-reuse case? Dunno, perhaps it's a new thread the kernel pulled out of a hat. In any case we still need to take a lock. */ VG_(acquire_BigLock_LL)("wqthread_hijack"); /* Start the thread with all signals blocked. VG_(scheduler) will set the mask correctly when we finally get there. */ VG_(sigfillset)(&blockall); VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL); if (reuse) { // This thread already exists; we're merely re-entering // after leaving via workq_ops(WQOPS_THREAD_RETURN). // Don't allocate any V thread resources. // Do reset thread registers. ThreadId tid = VG_(lwpid_to_vgtid)(kport); vg_assert(VG_(is_valid_tid)(tid)); vg_assert(mach_thread_self() == kport); tst = VG_(get_ThreadState)(tid); vex = &tst->arch.vex; vg_assert(tst->os_state.pthread == self); } else { // This is a new thread. tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)()); vex = &tst->arch.vex; allocstack(tst->tid); LibVEX_GuestX86_initialise(vex); } // Set thread's registers // Do this FIRST because some code below tries to collect a backtrace, // which requires valid register data. vex->guest_EIP = wqthread_starter; vex->guest_EAX = self; vex->guest_EBX = kport; vex->guest_ECX = stackaddr; vex->guest_EDX = workitem; vex->guest_EDI = reuse; vex->guest_ESI = 0; vex->guest_ESP = sp; stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE stack = VG_PGROUNDUP(sp) - stacksize; if (reuse) { // Continue V's thread back in the scheduler. // The client thread is of course in another location entirely. /* Drop the lock before going into ML_(wqthread_continue_NORETURN). The latter will immediately attempt to reacquire it in non-LL mode, which is a bit wasteful but I don't think is harmful. A better solution would be to not drop the lock but instead "upgrade" it from a LL lock to a full lock, but that's too much like hard work right now. */ VG_(release_BigLock_LL)("wqthread_hijack(1)"); ML_(wqthread_continue_NORETURN)(tst->tid); } else { // Record thread's stack and Mach port and pthread struct tst->os_state.pthread = self; tst->os_state.lwpid = kport; record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p"); // kernel allocated stack - needs mapping tst->client_stack_highest_word = stack+stacksize; tst->client_stack_szB = stacksize; // GrP fixme scheduler lock?! // pthread structure ML_(notify_core_and_tool_of_mmap)( stack+stacksize, pthread_structsize, VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0); // stack contents // GrP fixme uninitialized! ML_(notify_core_and_tool_of_mmap)( stack, stacksize, VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0); // guard page // GrP fixme ban_mem_stack! ML_(notify_core_and_tool_of_mmap)( stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE, 0, VKI_MAP_PRIVATE, -1, 0); ML_(sync_mappings)("after", "wqthread_hijack", 0); // Go! /* Same comments as the 'release' in the then-clause. start_thread_NORETURN calls run_thread_NORETURN calls thread_wrapper which acquires the lock before continuing. Let's hope nothing non-thread-local happens until that point. DDD: I think this is plain wrong .. if we get to thread_wrapper not holding the lock, and someone has recycled this thread slot in the meantime, we're hosed. Is that possible, though? */ VG_(release_BigLock_LL)("wqthread_hijack(2)"); call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0, start_thread_NORETURN, (Word)tst); } /*NOTREACHED*/ vg_assert(0); }
void allocate(int datareg, int addreg, int floatreg, SNODE *block ) /* * allocate will allocate registers for the expressions that have * a high enough desirability. */ { CSE *csp; ENODE *exptr; unsigned mask, rmask,i,fmask,frmask,size; AMODE *ap, *ap2; framedepth = 4+lc_maxauto; mask = 0; rmask = 0; fmask = frmask = 0; for (i=cf_freedata; i < datareg; i++) { framedepth+=4; rmask = rmask | (1 << (15 - i)); mask = mask | (1 << i); } for (i=cf_freeaddress+16; i < addreg; i++) { framedepth+=4; rmask = rmask | (1 << (23 - i)); mask = mask | (1 << (i-8)); } while( bsort(&olist) ); /* sort the expression list */ csp = olist; while( csp != 0 ) { if (csp->reg == -1 && !(csp->exp->cflags & DF_VOL) && !csp->voidf) { if( desire(csp) < 3 ) csp->reg = -1; else { if (csp->exp->nodetype == en_rcon || csp->exp->nodetype == en_fcon || csp->exp->nodetype == en_lrcon || csp->exp->nodetype == en_floatref || csp->exp->nodetype ==en_doubleref || csp->exp->nodetype == en_longdoubleref) { if (floatreg <24 && floatregs) csp->reg = floatreg++; } else if( (datareg < cf_maxdata) && (csp->duses <= csp->uses/4) && dataregs) csp->reg = (datareg)++; else if( !(csp->size == 1 || csp->size == -1) && (addreg < cf_maxaddress) &&addrregs) csp->reg = (addreg)++; } } if( csp->reg != -1 ) { if (lvalue(csp->exp) && !((SYM *)csp->exp->v.p[0]->v.p[0])->funcparm) { ((SYM *)csp->exp->v.p[0]->v.p[0])->inreg = TRUE; ((SYM *)csp->exp->v.p[0]->v.p[0])->value.i = -csp->reg; } if (csp->reg < 16) { framedepth+=4; rmask = rmask | (1 << (15 - csp->reg)); mask = mask | (1 << csp->reg); } else if (csp->reg < 32) { framedepth+=4; rmask = rmask | (1 << (23 - csp->reg)); mask = mask | (1 << (csp->reg-8)); } else { framedepth+=12; frmask = frmask | (1 << (39 - csp->reg)); fmask = fmask | (1 << (csp->reg-32)); } } csp = csp->next; } allocstack(); /* Allocate stack space for the local vars */ if (currentfunc->tp->lst.head !=0 && currentfunc->tp->lst.head != (SYM *)-1) { if (prm_phiform || currentfunc->intflag) { mask |= (1 << (linkreg +8)); rmask |= (1 << (15 - linkreg -8)); framedepth+=4; } if (currentfunc->intflag) { mask |= 0xffff; rmask |= 0xffff; framedepth = lc_maxauto; } } if (prm_linkreg && !currentfunc->intflag && (currentfunc->tp->lst.head && currentfunc->tp->lst.head != (SYM *)-1 || lc_maxauto)) { gen_code(op_link,0,makeareg(linkreg),make_immed(-lc_maxauto)); } if( mask != 0 ) gen_code(op_movem,4,make_mask(rmask,0,0),push); save_mask = mask; if (fmask!=0) gen_code(op_fmovem,10,make_mask(frmask,0,1),push); fsave_mask = fmask; if ((prm_phiform || currentfunc->intflag) && currentfunc->tp->lst.head && currentfunc->tp->lst.head != (SYM *) -1) { gen_code(op_move,4,makeareg(0), makeareg(linkreg)); } if ((!prm_linkreg || currentfunc->intflag) && lc_maxauto) { AMODE *ap = xalloc(sizeof(AMODE)); ap->mode = am_indx; ap->offset = makenode(en_icon,(char *)-lc_maxauto,0); ap->preg = 7; gen_code(op_lea,0,ap,makeareg(7)); } if (prm_stackcheck) { AMODE *ap1; ap = set_symbol("_stackerror",1); ap1 = set_symbol("_stackbottom",0); if (prm_rel) { ap1->mode = am_indx; ap1->preg = basereg; } else { ap1->mode = am_adirect; if (prm_smalldata) ap1->preg = 2; else ap1->preg = 4; } gen_code(op_cmp,4,ap1,makeareg(7)); gen_code(op_bhi,0,ap,0); } }
/* wqthread note: The kernel may create or destroy pthreads in the wqthread pool at any time with no userspace interaction, and wqthread_start may be entered at any time with no userspace interaction. To handle this in valgrind, we create and destroy a valgrind thread for every work item. */ void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem, Int reuse, Addr sp) { ThreadState *tst; VexGuestX86State *vex; Addr stack; SizeT stacksize; vki_sigset_t blockall; /* When we enter here we hold no lock (!), so we better acquire it pronto. Why do we hold no lock? Because (presumably) the only way to get here is as a result of a SfMayBlock syscall "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the lock. At least that's clear for the 'reuse' case. The non-reuse case? Dunno, perhaps it's a new thread the kernel pulled out of a hat. In any case we still need to take a lock. */ VG_(acquire_BigLock_LL)("wqthread_hijack"); /* Start the thread with all signals blocked. VG_(scheduler) will set the mask correctly when we finally get there. */ VG_(sigfillset)(&blockall); VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL); if (reuse) { /* For whatever reason, tst->os_state.pthread appear to have a constant offset of 72 on 10.7, but zero on 10.6 and 10.5. No idea why. */ # if DARWIN_VERS <= DARWIN_10_6 UWord magic_delta = 0; # elif DARWIN_VERS >= DARWIN_10_7 UWord magic_delta = 0x48; # endif // This thread already exists; we're merely re-entering // after leaving via workq_ops(WQOPS_THREAD_RETURN). // Don't allocate any V thread resources. // Do reset thread registers. ThreadId tid = VG_(lwpid_to_vgtid)(kport); vg_assert(VG_(is_valid_tid)(tid)); vg_assert(mach_thread_self() == kport); tst = VG_(get_ThreadState)(tid); if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, " "tst->os_state.pthread %#lx, self %#lx\n", tst->os_state.pthread == self ? "SAME" : "DIFF", tid, tst, tst->os_state.pthread, self); vex = &tst->arch.vex; vg_assert(tst->os_state.pthread - magic_delta == self); } else { // This is a new thread. tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)()); vex = &tst->arch.vex; allocstack(tst->tid); LibVEX_GuestX86_initialise(vex); /* Tell threading tools the new thread exists. FIXME: we need to know the identity (tid) of the parent thread, in order that threading tools can make a dependency edge from it to this thread. But we don't know what the parent thread is. Hence pass 1 (the root thread). This is completely wrong in general, and could cause large numbers of false races to be reported. In fact, it's positively dangerous; we don't even know if thread 1 is still alive, and the thread checkers are likely to assert if it isn't. */ VG_TRACK(pre_thread_ll_create, 1/*BOGUS*/, tst->tid); } // Set thread's registers // Do this FIRST because some code below tries to collect a backtrace, // which requires valid register data. vex->guest_EIP = wqthread_starter; vex->guest_EAX = self; vex->guest_EBX = kport; vex->guest_ECX = stackaddr; vex->guest_EDX = workitem; vex->guest_EDI = reuse; vex->guest_ESI = 0; vex->guest_ESP = sp; stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE stack = VG_PGROUNDUP(sp) - stacksize; VG_TRACK(workq_task_start, tst->tid, workitem); if (reuse) { // Continue V's thread back in the scheduler. // The client thread is of course in another location entirely. /* Drop the lock before going into ML_(wqthread_continue_NORETURN). The latter will immediately attempt to reacquire it in non-LL mode, which is a bit wasteful but I don't think is harmful. A better solution would be to not drop the lock but instead "upgrade" it from a LL lock to a full lock, but that's too much like hard work right now. */ VG_(release_BigLock_LL)("wqthread_hijack(1)"); ML_(wqthread_continue_NORETURN)(tst->tid); } else { // Record thread's stack and Mach port and pthread struct tst->os_state.pthread = self; tst->os_state.lwpid = kport; record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p"); // kernel allocated stack - needs mapping tst->client_stack_highest_word = stack+stacksize; tst->client_stack_szB = stacksize; // tell the tool that we are at a point after the new thread // has its registers set up (so we can take a stack snapshot), // but before it has executed any instructions (or, really, // before it has done any memory references). VG_TRACK(pre_thread_first_insn, tst->tid); // pthread structure ML_(notify_core_and_tool_of_mmap)( stack+stacksize, pthread_structsize, VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0); // stack contents // GrP fixme uninitialized! ML_(notify_core_and_tool_of_mmap)( stack, stacksize, VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0); // guard page // GrP fixme ban_mem_stack! ML_(notify_core_and_tool_of_mmap)( stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE, 0, VKI_MAP_PRIVATE, -1, 0); ML_(sync_mappings)("after", "wqthread_hijack", 0); // Go! /* Same comments as the 'release' in the then-clause. start_thread_NORETURN calls run_thread_NORETURN calls thread_wrapper which acquires the lock before continuing. Let's hope nothing non-thread-local happens until that point. DDD: I think this is plain wrong .. if we get to thread_wrapper not holding the lock, and someone has recycled this thread slot in the meantime, we're hosed. Is that possible, though? */ VG_(release_BigLock_LL)("wqthread_hijack(2)"); call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0, start_thread_NORETURN, (Word)tst); } /*NOTREACHED*/ vg_assert(0); }
void allocate(int datareg, int addreg, int floatreg, SNODE *block) /* * allocate will allocate registers for the expressions that have * a high enough desirability. It also puts the function * header, consisting of saved registers and stack decrments for local * variables */ { CSE *csp; ENODE *exptr; unsigned mask, rmask, i, fmask, frmask, size; AMODE *ap, *ap2; mask = asmMask; rmask = asmRMask; fmask = frmask = 0; for (i = cf_freedata; i < datareg; i++) { rmask = rmask | (1 << (15-i)); mask = mask | (1 << i); } for (i = cf_freeaddress + 16; i < addreg; i++) { rmask = rmask | (1 << (23-i)); mask = mask | (1 << (i - 8)); } while (bsort(&olist)) ; /* sort the expression list */ csp = olist; while (csp != 0) { if (csp->reg == - 1 && !(csp->exp->cflags &DF_VOL) && !csp->voidf) { if (desire(csp) < 3) csp->reg = - 1; else { if (csp->exp->nodetype == en_rcon || csp->exp->nodetype == en_fcon || csp->exp->nodetype == en_lrcon || csp->exp ->nodetype == en_floatref || csp->exp->nodetype == en_doubleref || csp->exp->nodetype == en_longdoubleref || csp->exp->nodetype == en_fimaginarycon || csp->exp->nodetype == en_rimaginarycon || csp->exp->nodetype == en_lrimaginarycon || csp->exp->nodetype == en_fimaginaryref || csp->exp->nodetype == en_rimaginaryref || csp->exp->nodetype == en_lrimaginaryref) {} else if ((csp->duses <= csp->uses / 4) && (datareg < cf_maxdata) && dataregs) csp->reg = (datareg)++; else if (!(csp->size == 1 || csp->size == - 1 || csp->size == 5) && (addreg < cf_maxaddress) && addrregs) csp->reg = (addreg)++; else if ((datareg < cf_maxdata) && dataregs) csp->reg = (datareg)++; if (csp->reg != - 1) // if (lvalue(csp->exp)) // csp->seg = defseg(csp->exp->v.p[0]) ; // else if (!csp->seg) { csp->seg = defseg(csp->exp); if (csp->seg) csp->reg = -1; } } } if (csp->reg != - 1) { if (lvalue(csp->exp) && !((SYM*)csp->exp->v.p[0]->v.p[0])->funcparm) { ((SYM*)csp->exp->v.p[0]->v.p[0])->mainsym->inreg = TRUE; ((SYM*)csp->exp->v.p[0]->v.p[0])->mainsym->value.i = - csp ->reg - (csp->size < 0 ? - csp->size: csp->size) *256; } if (csp->reg < 16) { rmask = rmask | (1 << (15-csp->reg)); mask = mask | (1 << csp->reg); } if (csp->reg < 32) { rmask = rmask | (1 << (23-csp->reg)); mask = mask | (1 << (csp->reg - 8)); } else { frmask = frmask | (1 << (39-csp->reg)); fmask = fmask | (1 << (csp->reg - 32)); } } csp = csp->next; } allocstack(); /* Allocate stack space for the local vars */ floatstack_mode = 0; /* no space for floating point temps */ if (currentfunc->intflag || currentfunc->faultflag) { mask = 0; rmask = 0; if (currentfunc->loadds) loadds(); if (prm_farkeyword) { GenPush(ES + 24, am_dreg, 0); GenPush(FS + 24, am_dreg, 0); GenPush(GS + 24, am_dreg, 0); } gen_code(op_pushad, 0, 0); } if ((conscount || try_block_list || currentfunc->value.classdata.throwlist && currentfunc->value.classdata.throwlist->data) && prm_xcept) { xceptoffs = lc_maxauto += sizeof(XCEPTDATA); } if (prm_debug) { rmask = rmask | (1 << (15-EBX)); mask = mask | (1 << EBX); rmask = rmask | (1 << (15-ESI-4)); mask = mask | (1 << (ESI+4)); rmask = rmask | (1 << (15-EDI-4)); mask = mask | (1 << (EDI+4)); } if (prm_cplusplus && prm_xcept || (funcfloat || lc_maxauto || currentfunc ->tp->lst.head && currentfunc->tp->lst.head != (SYM*) - 1) || (currentfunc->value.classdata.cppflags &PF_MEMBER) && !(currentfunc ->value.classdata.cppflags &PF_STATIC) || !prm_smartframes || !stackframeoff) { /* enter is *really* inefficient so we will not use it */ if (!currentfunc->intflag) gen_codes(op_push, 4, makedreg(EBP), 0); gen_codes(op_mov, 4, makedreg(EBP), makedreg(ESP)); if (lc_maxauto) gen_code(op_sub, makedreg(ESP), make_immed(lc_maxauto)); // FIXME ... could auto-alloc an FP value when no frame! frame_ins = peep_tail; } else frame_ins = 0; if (mask != 0) PushRegs(rmask); save_mask = mask; if (fmask != 0) fsave_mask = fmask; if (currentfunc->loadds && !currentfunc->intflag) { loadds(); } if (prm_stackcheck && lc_maxauto) { AMODE *ap1; ap = set_symbol("__stackerror", 1); ap1 = set_symbol("__stackbottom", 0); ap1->mode = am_direct; gen_codes(op_cmp, 4, makedreg(ESP), ap1); gen_codes(op_jb, 0, ap, 0); } AddProfilerData(); if ((conscount || try_block_list || currentfunc->value.classdata.throwlist && currentfunc->value.classdata.throwlist->data) && prm_xcept) { currentfunc->value.classdata.conslabel = nextlabel++; currentfunc->value.classdata.destlabel = nextlabel++; gen_codes(op_mov, 4, makedreg(EAX), make_label(nextlabel - 2)); call_library("__InitExceptBlock"); gen_label(nextlabel - 1); } }