/* * Inflates the compressed lockword into fat fat_monitor */ hythread_monitor_t VMCALL hythread_inflate_lock(hythread_thin_monitor_t *lockword_ptr) { hythread_monitor_t fat_monitor; IDATA status; IDATA fat_monitor_id; U_32 lockword; int i; // we don't need to write lock on lock_table during all this function because // the only invariant we need is 'fat lock is not in the fat lock table before we put it' // however this invariant is true because we hold monitor->mutex during this function // so it cannot be called twice for the signle monitor concurrently lockword = *lockword_ptr; if (IS_FAT_LOCK (lockword)) { return locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); } #ifdef LOCK_RESERVATION // unreserve lock first if (IS_RESERVED(lockword)) { unreserve_self_lock(lockword_ptr); lockword = *lockword_ptr; } assert(!IS_RESERVED(lockword)); #endif assert(hythread_owns_thin_lock(tm_self_tls, lockword)); assert(!hythread_is_suspend_enabled()); CTRACE(("inflation begin for %x thread: %d", lockword, tm_self_tls->thread_id)); status = hythread_monitor_init(&fat_monitor, 0); // allocate fat fat_monitor //assert(status == TM_ERROR_NONE); if (status != TM_ERROR_NONE) { return NULL; } status = hythread_monitor_enter(fat_monitor); if (status != TM_ERROR_NONE) { return NULL; } for (i = RECURSION(lockword); i > 0; i--) { CTRACE(("inflate recursion monitor")); status = hythread_monitor_enter(fat_monitor); // transfer recursion count to fat fat_monitor assert(status == TM_ERROR_NONE); } fat_monitor_id = locktable_put_fat_monitor(fat_monitor); // put fat_monitor into lock table set_fat_lock_id(lockword_ptr, fat_monitor_id); CTRACE(("hythread_inflate_lock %d thread: %d\n", FAT_LOCK_ID(*lockword_ptr), tm_self_tls->thread_id)); //assert(FAT_LOCK_ID(*lockword_ptr) != 2); CTRACE(("FAT ID : 0x%x", *lockword_ptr)); #ifdef LOCK_RESERVATION assert(!IS_RESERVED(*lockword_ptr)); #endif return fat_monitor; }
/** * Unlocks thin monitor. * * @param[in] lockword_ptr monitor addr */ IDATA VMCALL hythread_thin_monitor_exit(hythread_thin_monitor_t *lockword_ptr) { U_32 lockword = *lockword_ptr; hythread_monitor_t fat_monitor; IDATA this_id = tm_self_tls->thread_id; // obtain current thread id assert(this_id > 0 && this_id < 0xffff); assert(!hythread_is_suspend_enabled()); if (THREAD_ID(lockword) == this_id) { if (RECURSION(lockword)==0) { #ifdef LOCK_RESERVATION if (IS_RESERVED(lockword)) { CTRACE(("ILLEGAL_STATE %x\n", lockword)); return TM_ERROR_ILLEGAL_STATE; } #endif *lockword_ptr = lockword & 0xffff; } else { RECURSION_DEC(lockword_ptr, lockword); //CTRACE(("recursion_dec: 0x%x", *lockword_ptr)); } //CTRACE(("unlocked: 0x%x id: %d\n", *lockword_ptr, THREAD_ID(*lockword_ptr))); //hythread_safe_point(); return TM_ERROR_NONE; } else if (IS_FAT_LOCK(lockword)) { CTRACE(("exit fat monitor %d thread: %d\n", FAT_LOCK_ID(lockword), tm_self_tls->thread_id)); fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); // find fat_monitor return hythread_monitor_exit(fat_monitor); // unlock fat_monitor } CTRACE(("ILLEGAL_STATE %d\n", FAT_LOCK_ID(lockword))); return TM_ERROR_ILLEGAL_STATE; }
IDATA VMCALL hythread_owns_thin_lock(hythread_t thread, hythread_thin_monitor_t lockword) { IDATA this_id = thread->thread_id; assert(!IS_FAT_LOCK(lockword)); #ifdef LOCK_RESERVATION return THREAD_ID(lockword) == this_id && (!IS_RESERVED(lockword) || RECURSION(lockword) !=0); #else return THREAD_ID(lockword) == this_id; #endif }
void set_fat_lock_id(hythread_thin_monitor_t *lockword_ptr, IDATA monitor_id) { U_32 lockword = *lockword_ptr; #ifdef LOCK_RESERVATION assert(!IS_RESERVED(lockword)); #endif assert((U_32)monitor_id < lock_table->size); lockword&=0x7FF; lockword|=(monitor_id << 11) | 0x80000000; *lockword_ptr=lockword; port_rw_barrier(); }
/* * Unreserves the lock already owned by this thread */ void unreserve_self_lock(hythread_thin_monitor_t *lockword_ptr) { U_32 lockword = *lockword_ptr; U_32 lockword_new; CTRACE(("unreserve self_id %d lock owner %d", hythread_get_self_id(), THREAD_ID(lockword))); assert(hythread_get_self_id() == THREAD_ID(lockword)); assert (!IS_FAT_LOCK(*lockword_ptr)); assert (IS_RESERVED(lockword)); CTRACE(("Unreserved self %d \n", ++unreserve_count_self/*, vm_get_object_class_name(lockword_ptr-1)*/)); // Set reservation bit to 1 and reduce recursion count lockword_new = (lockword | RESERVED_BITMASK); if (RECURSION(lockword_new) != 0) { RECURSION_DEC(lockword_ptr, lockword_new); } else { lockword_new = lockword_new & 0x0000ffff; *lockword_ptr = lockword_new; } assert(!IS_RESERVED(*lockword_ptr)); CTRACE(("unreserved self")); }
/** * Set up a DMA transfer. * @param direction the direction of the transfer (DMA_READ or DMA_WRITE) * @param chan the channel * @param addr the address of the buffer * @param size number of bytes to transfer */ void Setup_DMA(enum DMA_Direction direction, int chan, void *addr_, ulong_t size) { uchar_t mode = 0; ulong_t addr = (ulong_t) addr_; /* Make sure parameters are sensible */ KASSERT(direction == DMA_READ || direction == DMA_WRITE); KASSERT(VALID_CHANNEL(chan)); KASSERT(IS_RESERVED(chan)); KASSERT(VALID_MEM(addr, size)); KASSERT(size > 0); /* elaborate because the otherwise working test wouldn't work if the DMA region was precisely 64K page aligned. */ KASSERT0((((addr & 0xffff) == 0 && size <= 65536)) || (size <= (0xffff - (addr & 0xffff))), "DMA region can't cross 64K boundary"); /* Set up transfer mode */ mode |= DMA_MODE_SINGLE; mode |= (direction == DMA_READ) ? DMA_MODE_READ : DMA_MODE_WRITE; mode |= (chan & 3); if (chan == 5) mode |= 0x10; /* nspring testing, make this better if useful. */ Debug("Setup_DMA(%s,%d,%x,%d)\n", direction == DMA_READ ? "DMA_READ" : "DMA_WRITE", chan, addr, size); Debug("Setup_DMA: mode=%02x\n", mode); Debug("DMA_ADDR_REG for channel is %02x\n", DMA_ADDR_REG(chan)); Debug("DMA_PAGE_REG for channel is %02x\n", DMA_PAGE_REG(chan)); Debug("DMA_COUNT_REG for channel is %02x\n", DMA_COUNT_REG(chan)); /* Temporarily mask the DMA channel */ Mask_DMA(chan); /* Write the transfer mode */ Out_Byte(DMA_MODE_REG(chan), mode); /* Clear the byte pointer flip-flop */ Out_Byte(DMA_CLEAR_FF_REG(chan), 0); /* doesn't matter what value is written here */ /* Write the transfer address (LSB, then MSB) */ Out_Byte(DMA_ADDR_REG(chan), addr & 0xFF); Out_Byte(DMA_ADDR_REG(chan), (addr >> 8) & 0xFF); /* Write the page register */ Out_Byte(DMA_PAGE_REG(chan), (addr >> 16) & 0xFF); /* * Write the count (LSB, then MSB) * Note that the count is one less that the number of bytes transferred */ if (chan > 4) { size >>= 1; } /* words not bytes? */
/** * Reserve given DMA channel. * @param chan the channel to reserve * @return true if successful, false if not */ bool Reserve_DMA(int chan) { bool iflag = Begin_Int_Atomic(); bool result = false; KASSERT(VALID_CHANNEL(chan)); if (!IS_RESERVED(chan)) { /* Channel is available; unmask it. */ Out_Byte(DMA_MASK_ONE_REG(chan), chan & 3); /* Mask channel as allocated */ s_allocated |= (1 << chan); result = true; } End_Int_Atomic(iflag); return result; }
/** * Returns the recursion count of the given monitor. * * @param[in] lockword_ptr monitor addr */ IDATA VMCALL hythread_thin_monitor_get_recursion(hythread_thin_monitor_t *lockword_ptr) { U_32 lockword; hythread_monitor_t fat_monitor; assert(lockword_ptr); lockword = *lockword_ptr; if (IS_FAT_LOCK(lockword)) { // find fat_monitor in lock table fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); return fat_monitor->recursion_count+1; } if (THREAD_ID(lockword) == 0) { return 0; } #ifdef LOCK_RESERVATION if (IS_RESERVED(lockword)) { return RECURSION(lockword); } #endif return RECURSION(lockword)+1; }
/** * Returns the owner of the given thin monitor. * * @param[in] lockword_ptr monitor addr */ hythread_t VMCALL hythread_thin_monitor_get_owner(hythread_thin_monitor_t *lockword_ptr) { U_32 lockword; hythread_monitor_t fat_monitor; assert(lockword_ptr); lockword = *lockword_ptr; if (IS_FAT_LOCK(lockword)) { // find fat_monitor in lock table fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); return fat_monitor->owner; } if (THREAD_ID(lockword)== 0) { return NULL; } #ifdef LOCK_RESERVATION if (RECURSION(lockword)==0 && IS_RESERVED(lockword)) { return NULL; } #endif return hythread_get_thread(THREAD_ID(lockword)); }
/** * bigobject_uri_to_string: * @uri: pointer to an bURI * * Save the bURI as an escaped string * * Returns a new string (to be deallocated by caller) */ char * bigobject_uri_to_string(bURI *uri) { char *ret = NULL; char *temp; const char *p; int len; int max; if (uri == NULL) return NULL; max = 80; ret = malloc(max + 1); len = 0; if (uri->scheme != NULL) { p = uri->scheme; while (*p != 0) { if (len >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = *p++; } if (len >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = ':'; } if (uri->opaque != NULL) { p = uri->opaque; while (*p != 0) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } if (IS_RESERVED(*(p)) || IS_UNRESERVED(*(p))) ret[len++] = *p++; else { int val = *(unsigned char *)p++; int hi = val / 0x10, lo = val % 0x10; ret[len++] = '%'; ret[len++] = hi + (hi > 9? 'A'-10 : '0'); ret[len++] = lo + (lo > 9? 'A'-10 : '0'); } } } else { if (uri->server != NULL) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = '/'; ret[len++] = '/'; if (uri->user != NULL) { p = uri->user; while (*p != 0) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } if ((IS_UNRESERVED(*(p))) || ((*(p) == ';')) || ((*(p) == ':')) || ((*(p) == '&')) || ((*(p) == '=')) || ((*(p) == '+')) || ((*(p) == '$')) || ((*(p) == ','))) ret[len++] = *p++; else { int val = *(unsigned char *)p++; int hi = val / 0x10; int lo = val % 0x10; ret[len++] = '%'; ret[len++] = hi + (hi > 9? 'A'-10 : '0'); ret[len++] = lo + (lo > 9? 'A'-10 : '0'); } } if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = '@'; } p = uri->server; while (*p != 0) { if (len >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = *p++; } if (uri->port > 0) { if (len + 10 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } len += snprintf(&ret[len], max - len, ":%d", uri->port); } } else if (uri->authority != NULL) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = '/'; ret[len++] = '/'; p = uri->authority; while (*p != 0) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } if ((IS_UNRESERVED(*(p))) || ((*(p) == '$')) || ((*(p) == ',')) || ((*(p) == ';')) || ((*(p) == ':')) || ((*(p) == '@')) || ((*(p) == '&')) || ((*(p) == '=')) || ((*(p) == '+'))) ret[len++] = *p++; else { int val = *(unsigned char *)p++; int hi = val / 0x10, lo = val % 0x10; ret[len++] = '%'; ret[len++] = hi + (hi > 9? 'A'-10 : '0'); ret[len++] = lo + (lo > 9? 'A'-10 : '0'); } } } else if (uri->scheme != NULL) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = '/'; ret[len++] = '/'; } if (uri->path != NULL) { p = uri->path; /* * the colon in file:///d: should not be escaped or * Windows accesses fail later. */ if ((uri->scheme != NULL) && (p[0] == '/') && (((p[1] >= 'a') && (p[1] <= 'z')) || ((p[1] >= 'A') && (p[1] <= 'Z'))) && (p[2] == ':') && (!strcmp(uri->scheme, "file"))) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = *p++; ret[len++] = *p++; ret[len++] = *p++; } while (*p != 0) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } if ((IS_UNRESERVED(*(p))) || ((*(p) == '/')) || ((*(p) == ';')) || ((*(p) == '@')) || ((*(p) == '&')) || ((*(p) == '=')) || ((*(p) == '+')) || ((*(p) == '$')) || ((*(p) == ','))) ret[len++] = *p++; else { int val = *(unsigned char *)p++; int hi = val / 0x10, lo = val % 0x10; ret[len++] = '%'; ret[len++] = hi + (hi > 9? 'A'-10 : '0'); ret[len++] = lo + (lo > 9? 'A'-10 : '0'); } } } if (uri->query != NULL) { if (len + 1 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = '?'; p = uri->query; while (*p != 0) { if (len + 1 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = *p++; } } } if (uri->fragment != NULL) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len++] = '#'; p = uri->fragment; while (*p != 0) { if (len + 3 >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } if ((IS_UNRESERVED(*(p))) || (IS_RESERVED(*(p)))) ret[len++] = *p++; else { int val = *(unsigned char *)p++; int hi = val / 0x10, lo = val % 0x10; ret[len++] = '%'; ret[len++] = hi + (hi > 9? 'A'-10 : '0'); ret[len++] = lo + (lo > 9? 'A'-10 : '0'); } } } if (len >= max) { temp = realloc2n(ret, &max); if (temp == NULL) goto mem_error; ret = temp; } ret[len] = 0; return ret; mem_error: FREE(ret); return NULL; }
/* Stores an escaped value into an attribute. Determines type of attribute at * the same time. * * tag must be null terminated. * val must be of length len. * policy will only be respected where it can be (ints, strings, and opaques). * * the contents of tag are NOT verified. * * Returns: * SLP_PARAMETER_BAD - Syntax error in the value. * SLP_MEMORY_ALLOC_FAILED */ SLPError SLPAttrStore(struct xx_SLPAttributes *slp_attr, const char *tag, const char *val, size_t len, SLPInsertionPolicy policy ) { int i; /* Index into val. */ SLPBoolean is_str; /* Flag used for checking if given is string. */ char *unescaped; size_t unescaped_len; /* Length of the unescaped text. */ /***** Check opaque. *****/ if (strncmp(val, OPAQUE_PREFIX, OPAQUE_PREFIX_LEN) == 0) { /*** Verify length (ie, that it is the multiple of the size of an * escaped character). ***/ if (len % ESCAPED_LEN != 0) { return SLP_PARAMETER_BAD; } unescaped_len = (len / ESCAPED_LEN) - 1; /* -1 to drop the OPAQUE_PREFIX. */ /*** Verify that every character has been escaped. ***/ /* TODO */ /***** Unescape the value. *****/ unescaped = (char *)malloc(unescaped_len); if (unescaped == NULL) { return SLP_MEMORY_ALLOC_FAILED; /* FIXME: Real error code. */ } if (unescape_into(unescaped, (char *)(val + OPAQUE_PREFIX_LEN), len - OPAQUE_PREFIX_LEN) != NULL) { SLPError err; err = SLPAttrSet_opaque((SLPAttributes)slp_attr, tag, unescaped, (len - OPAQUE_PREFIX_LEN) / 3, policy); free(unescaped);/* FIXME This should be put into the val, and free()'d in val_destroy(). */ return err; } return SLP_PARAMETER_BAD; /* FIXME Verify. Is this really a bad parameter?*/ } /***** Check boolean. *****/ if ((BOOL_TRUE_STR_LEN == len) && (strncmp(val, BOOL_TRUE_STR, len) == 0) ) { return SLPAttrSet_bool((SLPAttributes)slp_attr, tag, SLP_TRUE); } if ((BOOL_FALSE_STR_LEN == len) && strncmp(val, BOOL_FALSE_STR, len) == 0) { return SLPAttrSet_bool((SLPAttributes)slp_attr, tag, SLP_FALSE); } /***** Check integer *****/ if (*val == '-' || isdigit((int)*val)) { /*** Verify. ***/ SLPBoolean is_int = SLP_TRUE; /* Flag true if the attr is an int. */ for (i = 1; i < len; i++) { /* We start at 1 since first char has already been checked. */ if (!isdigit((int)val[i])) { is_int = SLP_FALSE; break; } } /*** Handle the int-ness. ***/ if (is_int == SLP_TRUE) { char *end; /* To verify that the correct length was read. */ SLPError err; err = SLPAttrSet_int((SLPAttributes)slp_attr, tag, strtol(val, &end, 10), policy); assert(end == val + len); return err; } } /***** Check string. *****/ is_str = SLP_TRUE; for(i = 0; i < len; i++) { if (IS_RESERVED(val[i]) && (val[i] != '\\')) { is_str = SLP_FALSE; break; } } if (is_str == SLP_TRUE) { unescaped_len = find_unescaped_size(val, len); unescaped = (char *)malloc( unescaped_len + 1 ); if (unescape_into(unescaped, val, len) != NULL) { SLPError err; unescaped[unescaped_len] = '\0'; err = SLPAttrSet_str((SLPAttributes)slp_attr, tag, unescaped, policy); free(unescaped); /* FIXME This should be put into the val, and free()'d in val_destroy(). */ return err; } return SLP_PARAMETER_BAD; } /* We don't bother checking for a keyword attribute since it can't have a * value. */ return SLP_PARAMETER_BAD; /* Could not determine type. */ }
/** * Attempts to lock thin monitor. * If the monitor is already locked, this call returns immediately with TM_BUSY. * * @param[in] lockword_ptr monitor addr */ IDATA hythread_thin_monitor_try_enter(hythread_thin_monitor_t *lockword_ptr) { U_32 lockword; // warkaround strange intel compiler bug #if defined (__INTEL_COMPILER) && defined (LINUX) volatile #endif IDATA this_id = tm_self_tls->thread_id; IDATA lock_id; IDATA status; hythread_monitor_t fat_monitor; int UNUSED i; assert(!hythread_is_suspend_enabled()); assert((UDATA)lockword_ptr > 4); assert(tm_self_tls); // By DRLVM design rules lockword (see description in thin locks paper) // is only modified without compare-and-exchange by owner thread. If tools // like Intel Thread Checker find a bug about this line, it may actually be a // false-positive. lockword = *lockword_ptr; lock_id = THREAD_ID(lockword); //CTRACE(("try lock %x %d", this_id, RECURSION(lockword))); // Check if the lock is already reserved or owned by this thread if (lock_id == this_id) { if (RECURSION(lockword) == MAX_RECURSION) { //inflate lock in case of recursion overflow fat_monitor = hythread_inflate_lock(lockword_ptr); if (fat_monitor == NULL) { return TM_ERROR_OUT_OF_MEMORY; } return hythread_monitor_try_enter(fat_monitor); //break FAT_LOCK; } else { CTRACE(("try lock %x count:%d", this_id, res_lock_count++)); // increase recursion RECURSION_INC(lockword_ptr, lockword); return TM_ERROR_NONE; } } // Fast path didn't work, someoneelse is holding the monitor (or it isn't reserved yet): // DO SPIN FOR A WHILE, this will decrease the number of fat locks. #ifdef SPIN_COUNT for (i = SPIN_COUNT; i >=0; i--, lockword = *lockword_ptr, lock_id = THREAD_ID(lockword)) { #endif // Check if monitor is free and thin if (lock_id == 0) { // Monitor is free assert( RECURSION(lockword) < 1); assert(this_id > 0 && this_id < 0x8000); // Acquire monitor if (0 != port_atomic_cas16 (((volatile apr_uint16_t*) lockword_ptr)+1, (apr_uint16_t) this_id, 0)) { #ifdef SPIN_COUNT continue; #else return TM_ERROR_EBUSY; #endif } #ifdef LOCK_RESERVATION //lockword = *lockword_ptr; // this reloading of lockword may be odd, need to investigate; if (IS_RESERVED(lockword)) { CTRACE(("initially reserve lock %x count: %d ", *lockword_ptr, init_reserve_cout++)); RECURSION_INC(lockword_ptr, *lockword_ptr); } #endif CTRACE(("CAS lock %x count: %d ", *lockword_ptr, cas_cout++)); return TM_ERROR_NONE; } else // Fat monitor if (IS_FAT_LOCK(lockword)) { CTRACE(("FAT MONITOR %d \n", ++fat_lock2_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); // find fat_monitor in lock table status = hythread_monitor_try_enter(fat_monitor); #ifdef SPIN_COUNT if (status == TM_ERROR_EBUSY) { continue; } #endif return status; } #ifdef LOCK_RESERVATION // unreserved busy lock else if (IS_RESERVED(lockword)) { status = hythread_unreserve_lock(lockword_ptr); if (status != TM_ERROR_NONE) { #ifdef SPIN_COUNT if (status == TM_ERROR_EBUSY) { continue; } #endif //SPIN_COUNT return status; } return hythread_thin_monitor_try_enter(lockword_ptr); } #endif #ifdef SPIN_COUNT hythread_yield(); } #endif return TM_ERROR_EBUSY; }
/** * Used lockword * Thin monitor functions used java monitor. */ IDATA VMCALL hythread_unreserve_lock(hythread_thin_monitor_t *lockword_ptr) { U_32 lockword = *lockword_ptr; U_32 lockword_new; uint16 lock_id; hythread_t owner; IDATA status; I_32 append; // trylock used to prevent cyclic suspend deadlock // the java_monitor_enter calls safe_point between attempts. /*status = port_mutex_trylock(&TM_LOCK); if (status !=TM_ERROR_NONE) { return status; }*/ if (IS_FAT_LOCK(lockword)) { return TM_ERROR_NONE; } lock_id = THREAD_ID(lockword); owner = hythread_get_thread(lock_id); CTRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); if (!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) { // port_mutex_unlock(&TM_LOCK); return TM_ERROR_NONE; } // suspend owner if (owner) { assert(owner); assert(hythread_get_id(owner) == lock_id); assert(owner != hythread_self()); if(owner->state & (TM_THREAD_STATE_TERMINATED | TM_THREAD_STATE_WAITING | TM_THREAD_STATE_WAITING_INDEFINITELY | TM_THREAD_STATE_WAITING_WITH_TIMEOUT | TM_THREAD_STATE_SLEEPING | TM_THREAD_STATE_PARKED | TM_THREAD_STATE_SUSPENDED | TM_THREAD_STATE_IN_MONITOR_WAIT)) { append = 0; } else { append = RESERVED_BITMASK; } status=hythread_suspend_other(owner); if (status !=TM_ERROR_NONE) { return status; } } else { append = 0; } if(!tm_properties || !tm_properties->use_soft_unreservation) { append = RESERVED_BITMASK; } // prepare new unreserved lockword and try to CAS it with old one. while (IS_RESERVED(lockword)) { assert(!IS_FAT_LOCK(lockword)); CTRACE(("unreserving lock")); if (RECURSION(lockword) != 0) { lockword_new = (lockword | RESERVED_BITMASK); assert(RECURSION(lockword) > 0); assert(RECURSION(lockword_new) > 0); RECURSION_DEC(&lockword_new, lockword_new); } else { lockword_new = (lockword | append); lockword_new = lockword_new & 0x0000ffff; } if (lockword == apr_atomic_cas32 (((volatile apr_uint32_t*) lockword_ptr), (apr_uint32_t) lockword_new, lockword)) { CTRACE(("unreserved lock")); break; } lockword = *lockword_ptr; } // resume owner if (owner) { hythread_yield_other(owner); hythread_resume(owner); } /* status = port_mutex_unlock(&TM_LOCK);*/ // Gregory - This lock, right after it was unreserved, may be // inflated by another thread and therefore instead of recursion // count and reserved flag it will have the fat monitor ID. The // assertion !IS_RESERVED(lockword) fails in this case. So it is // necessary to check first that monitor is not fat. // To avoid race condition between checking two different // conditions inside of assert, the lockword contents has to be // loaded before checking. // lockword = *lockword_ptr; // assert(IS_FAT_LOCK(lockword) || !IS_RESERVED(lockword)); return TM_ERROR_NONE; }