/* NOTE: every malloc'd storage here should be free'd in a nested call-in environment. * gtmci_isv_restore (in gtmci_isv.c) needs to be reflected for any future mallocs added here. */ void ecode_init(void) { dollar_ecode.begin = (char *)malloc(DOLLAR_ECODE_ALLOC); dollar_ecode.top = dollar_ecode.begin + DOLLAR_ECODE_ALLOC; dollar_ecode.array = (dollar_ecode_struct *)malloc(SIZEOF(dollar_ecode_struct) * DOLLAR_ECODE_MAXINDEX); dollar_ecode.error_rtn_addr = NON_IA64_ONLY(CODE_ADDRESS(ERROR_RTN)) IA64_ONLY(CODE_ADDRESS_C(ERROR_RTN)); dollar_ecode.error_rtn_ctxt = GTM_CONTEXT(ERROR_RTN); dollar_ecode.error_return_addr = (error_ret_fnptr)ERROR_RETURN; dollar_stack.begin = (char *)malloc(DOLLAR_STACK_ALLOC); dollar_stack.top = dollar_stack.begin + DOLLAR_STACK_ALLOC; dollar_stack.array = (dollar_stack_struct *)malloc(SIZEOF(dollar_stack_struct) * DOLLAR_STACK_MAXINDEX); NULLIFY_DOLLAR_ECODE; /* this macro resets dollar_ecode.{end,index}, dollar_stack.{begin,index,incomplete} and * first_ecode_error_frame to point as if no error occurred at all */ }
int4 add_inter(int val, sm_int_ptr_t addr, sm_global_latch_ptr_t latch) { int4 cntrval, newcntrval, spins, maxspins, retries; boolean_t cswpsuccess; sm_int_ptr_t volatile cntrval_p; ++fast_lock_count; maxspins = num_additional_processors ? MAX_LOCK_SPINS(LOCK_SPINS, num_additional_processors) : 1; cntrval_p = addr; /* Need volatile context especially on Itanium */ for (retries = LOCK_TRIES - 1; 0 < retries; retries--) /* - 1 so do rel_quant 3 times first */ { /* seems like a legitinate spin which could take advantage of transactional memory */ for (spins = maxspins; 0 < spins; spins--) { cntrval = *cntrval_p; newcntrval = cntrval + val; /* This is (currently as of 08/2007) the only non-locking usage of compswap in GT.M. We are not passing compswap an actual sm_global_latch_ptr_t addr like its function would normally dictate. However, since the address of the field we want to deal with is the first int in the global_latch_t, we just pass our int address properly cast to the type that compswap is expecting. The assert below verifies that this assumption has not changed (SE 08/2007) */ assert(0 == OFFSETOF(global_latch_t, u.parts.latch_pid)); IA64_ONLY(cswpsuccess = compswap_unlock(RECAST(sm_global_latch_ptr_t)cntrval_p, cntrval, newcntrval)); NON_IA64_ONLY(cswpsuccess = compswap((sm_global_latch_ptr_t)cntrval_p, cntrval, newcntrval)); if (cswpsuccess) { --fast_lock_count; assert(0 <= fast_lock_count); return newcntrval; } } if (retries & 0x3) /* On all but every 4th pass, do a simple rel_quant */ rel_quant(); /* Release processor to holder of lock (hopefully) */ else { /* On every 4th pass, we bide for awhile */ wcs_sleep(LOCK_SLEEP); assert(0 == (LOCK_TRIES % 4)); /* assures there are 3 rel_quants prior to first wcs_sleep() */ } } --fast_lock_count; assert(FALSE); rts_error_csa(CSA_ARG(NULL) VARLSTCNT(9) ERR_DBCCERR, 2, LEN_AND_LIT("*unknown*"), ERR_ERRCALL, 3, CALLFROM); return 0; /* To keep the compiler quiet */ }