Ejemplo n.º 1
0
void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
    lc_thread_t *thr;
    lc_locked_lock_t *ll;

    if (lck->inited != ERTS_LC_INITITALIZED)
	uninitialized_lock();

    if (lck->id < 0)
	return;

    thr = get_my_locked_locks();

    if (thr) {
	ll = thr->required.first;
	if (find_lock(&ll, lck))
	    unlock_of_required_lock(thr, lck);
    }

    ll = thr->locked.first;
    if (!find_lock(&ll, lck))
	unlock_of_not_locked(thr, lck);
}
Ejemplo n.º 2
0
void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
    erts_lc_locked_locks_t *l_lcks;
    erts_lc_locked_lock_t *l_lck;

    if (lck->inited != ERTS_LC_INITITALIZED)
	uninitialized_lock();

    if (lck->id < 0)
	return;

    l_lcks = get_my_locked_locks();

    if (l_lcks) {
	l_lck = l_lcks->required.first;
	if (find_lock(&l_lck, lck))
	    unlock_of_required_lock(l_lcks, lck);
    }

    l_lck = l_lcks->locked.first;
    if (!find_lock(&l_lck, lck))
	unlock_of_not_locked(l_lcks, lck);
}
Ejemplo n.º 3
0
int
(ftrylockfile)(FILE * fp)
{
    int	ret = -1;
    int	idx = file_idx(fp);
    struct	file_lock	*p;

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /* Get a pointer to any existing lock for the file: */
    if ((p = find_lock(idx, fp)) == NULL) {
        /*
         * The file is not locked, so this thread can
         * grab the lock:
         */
        p = do_lock(idx, fp);

        /*
         * The file is already locked, so check if the
         * running thread is the owner:
         */
    } else if (p->owner == pthread_self()) {
        /*
         * The running thread is already the
         * owner, so increment the count of
         * the number of times it has locked
         * the file:
         */
        p->count++;
    } else {
        /*
         * The file is locked for another thread,
         * so this try fails.
         */
        p = NULL;
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);

    /* Check if the lock was obtained: */
    if (p != NULL)
        /* Return success: */
        ret = 0;

    return (ret);
}
Ejemplo n.º 4
0
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
    erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
    int i;

    if (!l_lcks) {
	for (i = 0; i < len; i++)
	    resv[i] = 0;
    }
    else {
	erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
	for (i = 0; i < len; i++)
	    resv[i] = find_lock(&l_lck, &locks[i]);
    }
}
Ejemplo n.º 5
0
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
    lc_thread_t *thr = get_my_locked_locks();
    int i;

    if (!thr) {
	for (i = 0; i < len; i++)
	    resv[i] = 0;
    }
    else {
	lc_locked_lock_t *ll = thr->locked.first;
	for (i = 0; i < len; i++)
	    resv[i] = find_lock(&ll, &locks[i]);
    }
}
Ejemplo n.º 6
0
void
(flockfile)(FILE * fp)
{
    int	idx = file_idx(fp);
    struct	file_lock	*p;
    pthread_t	self = pthread_self();

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /* Get a pointer to any existing lock for the file: */
    if ((p = find_lock(idx, fp)) == NULL) {
        /*
         * The file is not locked, so this thread can
         * grab the lock:
         */
        do_lock(idx, fp);

        /*
         * The file is already locked, so check if the
         * running thread is the owner:
         */
    } else if (p->owner == self) {
        /*
         * The running thread is already the
         * owner, so increment the count of
         * the number of times it has locked
         * the file:
         */
        p->count++;
    } else {
        /*
         * The file is locked for another thread.
         * Append this thread to the queue of
         * threads waiting on the lock.
         */
        TAILQ_INSERT_TAIL(&p->lockers,self,waiting);
        while (p->owner != self) {
            __thrsleep(self, 0 | _USING_TICKETS, NULL,
                       &hash_lock.ticket, NULL);
            _spinlock(&hash_lock);
        }
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);
}
Ejemplo n.º 7
0
void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
			      char *file, unsigned int line)
{
    erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
    erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
    if (!find_lock(&l_lck, lck))
	required_not_locked(l_lcks, lck);
    l_lck = new_locked_lock(lck, op_flags, file, line);
    if (!l_lcks->required.last) {
	ASSERT(!l_lcks->required.first);
	l_lck->next = l_lck->prev = NULL;
	l_lcks->required.first = l_lcks->required.last = l_lck;
    }
    else {
	erts_lc_locked_lock_t *l_lck2;
	ASSERT(l_lcks->required.first);
	for (l_lck2 = l_lcks->required.last;
	     l_lck2;
	     l_lck2 = l_lck2->prev) {
	    if (l_lck2->id < lck->id
		|| (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
		break;
	    else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
		require_twice(l_lcks, lck);
	}
	if (!l_lck2) {
	    l_lck->next = l_lcks->required.first;
	    l_lck->prev = NULL;
	    l_lcks->required.first->prev = l_lck;
	    l_lcks->required.first = l_lck;
	}
	else {
	    l_lck->next = l_lck2->next;
	    if (l_lck->next) {
		ASSERT(l_lcks->required.last != l_lck2);
		l_lck->next->prev = l_lck;
	    }
	    else {
		ASSERT(l_lcks->required.last == l_lck2);
		l_lcks->required.last = l_lck;
	    }
	    l_lck->prev = l_lck2;
	    l_lck2->next = l_lck;		
	}
    }
}
Ejemplo n.º 8
0
void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
			      char *file, unsigned int line)
{
    lc_thread_t *thr = make_my_locked_locks();
    lc_locked_lock_t *ll = thr->locked.first;
    if (!find_lock(&ll, lck))
	required_not_locked(thr, lck);
    ll = new_locked_lock(thr, lck, options, file, line);
    if (!thr->required.last) {
	ASSERT(!thr->required.first);
	ll->next = ll->prev = NULL;
	thr->required.first = thr->required.last = ll;
    }
    else {
	lc_locked_lock_t *l_lck2;
	ASSERT(thr->required.first);
	for (l_lck2 = thr->required.last;
	     l_lck2;
	     l_lck2 = l_lck2->prev) {
	    if (l_lck2->id < lck->id
		|| (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
		break;
	    else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
		require_twice(thr, lck);
	}
	if (!l_lck2) {
	    ll->next = thr->required.first;
	    ll->prev = NULL;
	    thr->required.first->prev = ll;
	    thr->required.first = ll;
	}
	else {
	    ll->next = l_lck2->next;
	    if (ll->next) {
		ASSERT(thr->required.last != l_lck2);
		ll->next->prev = ll;
	    }
	    else {
		ASSERT(thr->required.last == l_lck2);
		thr->required.last = ll;
	    }
	    ll->prev = l_lck2;
	    l_lck2->next = ll;
	}
    }
}
Ejemplo n.º 9
0
void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
    erts_lc_locked_locks_t *l_lcks;
    erts_lc_locked_lock_t *l_lck;

    if (lck->inited != ERTS_LC_INITITALIZED)
	uninitialized_lock();

    if (lck->id < 0)
	return;

    l_lcks = get_my_locked_locks();

    if (l_lcks) {
	l_lck = l_lcks->required.first;
	if (find_lock(&l_lck, lck))
	    unlock_of_required_lock(l_lcks, lck);
    }

    for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
	if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
	    if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
		unlock_op_mismatch(l_lcks, lck, op_flags);
	    if (l_lck->prev)
		l_lck->prev->next = l_lck->next;
	    else
		l_lcks->locked.first = l_lck->next;
	    if (l_lck->next)
		l_lck->next->prev = l_lck->prev;
	    else
		l_lcks->locked.last = l_lck->prev;
	    lc_free((void *) l_lck);
	    return;
	}
    }
    
    unlock_of_not_locked(l_lcks, lck);
}
Ejemplo n.º 10
0
void
(funlockfile)(FILE * fp)
{
    int	idx = file_idx(fp);
    struct	file_lock	*p;

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /*
     * Get a pointer to the lock for the file and check that
     * the running thread is the one with the lock:
     */
    if ((p = find_lock(idx, fp)) != NULL && p->owner == pthread_self()) {
        /*
         * Check if this thread has locked the FILE
         * more than once:
         */
        if (--p->count == 0) {
            /* Get the new owner of the lock: */
            if ((p->owner = TAILQ_FIRST(&p->lockers)) != NULL) {
                /* Pop the thread off the queue: */
                TAILQ_REMOVE(&p->lockers,p->owner,waiting);

                /*
                 * This is the first lock for the new
                 * owner:
                 */
                p->count = 1;

                __thrwakeup(p->owner, 1);
            }
        }
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);
}
Ejemplo n.º 11
0
void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
    lc_thread_t *thr;
    lc_locked_lock_t *ll;

    if (lck->inited != ERTS_LC_INITITALIZED)
	uninitialized_lock();

    if (lck->id < 0)
	return;

    thr = get_my_locked_locks();

    if (thr) {
	ll = thr->required.first;
	if (find_lock(&ll, lck))
	    unlock_of_required_lock(thr, lck);
    }

    for (ll = thr ? thr->locked.last : NULL; ll; ll = ll->prev) {
	if (ll->id == lck->id && ll->extra == lck->extra) {
	    if ((ll->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
		unlock_op_mismatch(thr, lck, options);
	    if (ll->prev)
		ll->prev->next = ll->next;
	    else
		thr->locked.first = ll->next;
	    if (ll->next)
		ll->next->prev = ll->prev;
	    else
		thr->locked.last = ll->prev;
	    lc_free(thr, ll);
	    return;
	}
    }
    
    unlock_of_not_locked(thr, lck);
}
Ejemplo n.º 12
0
static char*
find_and_remove_message_item(int message_id, int sid,
							 bool all, bool remove_all,
							 bool filter_message,
							 int *sleep, char **event_name)
{
	alert_lock *alck;
	int _message_id;

	char *result = NULL;
	if (sleep != NULL)
		*sleep = 0;

	alck = find_lock(sid, false);

	if (event_name)
		*event_name = NULL;

	if (alck != NULL && alck->echo != NULL)
	{
		/* if I have registered and created item */
		struct _message_echo *echo, *last_echo;

		echo = alck->echo;
		last_echo = NULL;

		while (echo != NULL)
		{
			char *message_text;
			bool destroy_msg_item = false;

			if (filter_message && echo->message_id != message_id)
			{
				last_echo = echo;
				echo = echo->next_echo;
				continue;
			}

			message_text = echo->message->message;
			_message_id = echo->message_id;

			if (!remove_receiver(echo->message, sid))
			{
				destroy_msg_item = true;
				if (echo->message->prev_message != NULL)
					echo->message->prev_message->next_message =
						echo->message->next_message;
				else
					events[echo->message_id].messages =
						echo->message->next_message;
				if (echo->message->next_message != NULL)
					echo->message->next_message->prev_message =
						echo->message->prev_message;
				ora_sfree(echo->message->receivers);
				ora_sfree(echo->message);
			}
			if (last_echo == NULL)
			{
				alck->echo = echo->next_echo;
				ora_sfree(echo);
				echo = alck->echo;
			}
			else
			{
				last_echo->next_echo = echo->next_echo;
				ora_sfree(echo);
				echo = last_echo;
			}
			if (remove_all)
			{
				if (message_text != NULL && destroy_msg_item)
					ora_sfree(message_text);

				continue;
			}
			else if (_message_id == message_id || all)
			{
				/* I have to do local copy */
				if (message_text)
				{
					result = pstrdup(message_text);
					if (destroy_msg_item)
						ora_sfree(message_text);
				}

				if (event_name != NULL)
					*event_name = pstrdup(events[_message_id].event_name);

				break;
			}
		}
	}

	return result;
}
Ejemplo n.º 13
0
void 
funlockfile(FILE * fp)
{
	int	idx = file_idx(fp);
	struct	file_lock	*p;

	/*
	 * Defer signals to protect the scheduling queues from
	 * access by the signal handler:
	 */
	_thread_kern_sig_defer();

	/* Lock the hash table: */
	_SPINLOCK(&hash_lock);

	/*
	 * Get a pointer to the lock for the file and check that
	 * the running thread is the one with the lock:
	 */
	if ((p = find_lock(idx, fp)) != NULL &&
	    p->owner == _thread_run) {
		/*
		 * Check if this thread has locked the FILE
		 * more than once:
		 */
		if (p->count > 1)
			/*
			 * Decrement the count of the number of
			 * times the running thread has locked this
			 * file:
			 */
			p->count--;
		else {
			/*
			 * The running thread will release the
			 * lock now:
			 */
			p->count = 0;

			/* Get the new owner of the lock: */
			if ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
				/* Pop the thread off the queue: */
				TAILQ_REMOVE(&p->l_head,p->owner,qe);

				/*
				 * This is the first lock for the new
				 * owner:
				 */
				p->count = 1;

				/* Allow the new owner to run: */
				PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
			}
		}
	}

	/* Unlock the hash table: */
	_SPINUNLOCK(&hash_lock);

	/*
	 * Undefer and handle pending signals, yielding if
	 * necessary:
	 */
	_thread_kern_sig_undefer();
}