Пример #1
0
int cf_queue_priority_pop(cf_queue_priority *q, void *buf, int ms_wait) {
	QUEUE_LOCK(q);

	struct timespec tp;
	if (ms_wait > 0) {
		clock_gettime( CLOCK_REALTIME, &tp); 
		tp.tv_sec += ms_wait / 1000;
		tp.tv_nsec += (ms_wait % 1000) * 1000000;
		if (tp.tv_nsec > 1000000000) {
			tp.tv_nsec -= 1000000000;
			tp.tv_sec++;
		}
	}

	if (q->threadsafe) {
#ifdef EXTERNAL_LOCKS
		if (CF_Q_PRI_EMPTY(q)) {
			QUEUE_UNLOCK(q);
			return -1;
		}
#else
		while (CF_Q_PRI_EMPTY(q)) {
			if (CF_QUEUE_FOREVER == ms_wait) {
				pthread_cond_wait(&q->CV, &q->LOCK);
			}
			else if (CF_QUEUE_NOWAIT == ms_wait) {
				pthread_mutex_unlock(&q->LOCK);
				return(CF_QUEUE_EMPTY);
			}
			else {
				pthread_cond_timedwait(&q->CV, &q->LOCK, &tp);
				if (CF_Q_PRI_EMPTY(q)) {
					pthread_mutex_unlock(&q->LOCK);
					return(CF_QUEUE_EMPTY);
				}
			}
		}
#endif //EXERNAL_LOCKS
	}
	
	int rv;
	if (CF_Q_SZ(q->high_q))
		rv = cf_queue_pop(q->high_q, buf, 0);
	else if (CF_Q_SZ(q->medium_q))
		rv = cf_queue_pop(q->medium_q, buf, 0);
	else if (CF_Q_SZ(q->low_q))
		rv = cf_queue_pop(q->low_q, buf, 0);
	else rv = CF_QUEUE_EMPTY;
		
	QUEUE_UNLOCK(q);

		
	return(rv);
}
Пример #2
0
/* cf_queue_push_head
 * Push head goes to the front, which currently means memcpying the entire queue contents
 * which is not the most optimal, but somewhat performant in most cases
 * */
int
cf_queue_push_head(cf_queue *q, void *ptr)
{

    /* FIXME error */
    if (q->threadsafe && (0 != pthread_mutex_lock(&q->LOCK)))
        return(-1);

    /* Check queue length */
    if (CF_Q_SZ(q) == q->allocsz) {
        /* resize is a pain for circular buffers */
        if (0 != cf_queue_resize(q, q->allocsz * 2)) {
            if (q->threadsafe)
                pthread_mutex_unlock(&q->LOCK);
            //cf_warning(CF_QUEUE, "queue resize failure");
            return(-1);
        }
    }

    // easy case, tail insert is head insert
    if (q->read_offset == q->write_offset) {
        memcpy(CF_Q_ELEM_PTR(q,q->write_offset), ptr, q->elementsz);
        q->write_offset++;
    }
    // another easy case, there's space up front
    else if (q->read_offset > 0) {
        q->read_offset--;
        memcpy(CF_Q_ELEM_PTR(q,q->read_offset), ptr, q->elementsz);
    }
    // hard case, we're going to have to shift everything back
    else {
        memmove(CF_Q_ELEM_PTR(q, 1),CF_Q_ELEM_PTR(q, 0),q->elementsz * CF_Q_SZ(q) );
        memcpy(CF_Q_ELEM_PTR(q,0), ptr, q->elementsz);
        q->write_offset++;
    }

    // we're at risk of overflow if the write offset is that high
    if (q->write_offset & 0xC0000000) cf_queue_unwrap(q);

    if (q->threadsafe)
        pthread_cond_signal(&q->CV);

    /* FIXME blow a gasket */
    if (q->threadsafe && (0 != pthread_mutex_unlock(&q->LOCK)))
        return(-1);

    return(0);
}
Пример #3
0
/* cf_queue_push_limit
 * Push element on the queue only if size < limit.
 * */
bool cf_queue_push_limit(cf_queue *q, void *ptr, uint32_t limit) {
	QUEUE_LOCK(q);
	uint32_t size = CF_Q_SZ(q);

	if (size >= limit) {
		QUEUE_UNLOCK(q);
		return false;
	}

	/* Check queue length */
	if (size == q->allocsz) {
		/* resize is a pain for circular buffers */
		if (0 != cf_queue_resize(q, q->allocsz + CF_QUEUE_ALLOCSZ)) {
			QUEUE_UNLOCK(q);
			return false;
		}
	}

	memcpy(CF_Q_ELEM_PTR(q,q->write_offset), ptr, q->elementsz);
	q->write_offset++;
	// we're at risk of overflow if the write offset is that high
	if (q->write_offset & 0xC0000000) cf_queue_unwrap(q);

#ifndef EXTERNAL_LOCKS
	if (q->threadsafe)
		pthread_cond_signal(&q->CV);
#endif

	QUEUE_UNLOCK(q);
	return true;
}
Пример #4
0
/* cf_queue_push
 * Push goes to the front, which currently means memcpying the entire queue contents
 * */
int cf_queue_push(cf_queue *q, void *ptr) {
	/* FIXME arg check - and how do you do that, boyo? Magic numbers? */

	/* FIXME error */

	QUEUE_LOCK(q);

	/* Check queue length */
	if (CF_Q_SZ(q) == q->allocsz) {
		/* resize is a pain for circular buffers */
		if (0 != cf_queue_resize(q, q->allocsz + CF_QUEUE_ALLOCSZ)) {
			QUEUE_UNLOCK(q);
			return(-1);
		}
	}

	// todo: if queues are power of 2, this can be a shift
	memcpy(CF_Q_ELEM_PTR(q,q->write_offset), ptr, q->elementsz);
	q->write_offset++;
	// we're at risk of overflow if the write offset is that high
	if (q->write_offset & 0xC0000000) cf_queue_unwrap(q);

#ifndef EXTERNAL_LOCKS	
	if (q->threadsafe)
		pthread_cond_signal(&q->CV);
#endif 

	QUEUE_UNLOCK(q);

	return(0);
}
//
// This assumes the element we're looking for is unique! Returns
// CF_QUEUE_NOMATCH if the element is not found or not moved.
//
int cf_queue_priority_change(cf_queue_priority *priority_q, const void *ptr, int new_pri)
{
	cf_queue_priority_lock(priority_q);

	cf_queue *queues[3];

	queues[0] = priority_q->high_q;
	queues[1] = priority_q->medium_q;
	queues[2] = priority_q->low_q;

	int dest_q_itr = CF_QUEUE_PRIORITY_HIGH - new_pri;
	cf_queue *q;

	for (int q_itr = 0; q_itr < 3; q_itr++) {
		q = queues[q_itr];

		if (q_itr == dest_q_itr || CF_Q_SZ(q) == 0) {
			continue;
		}

		for (uint32_t i = q->read_offset; i < q->write_offset; i++) {
			if (memcmp(CF_Q_ELEM_PTR(q, i), ptr, q->element_sz) == 0) {
				// Move it to the queue with desired priority.
				cf_queue_delete_offset(q, i);
				cf_queue_push(queues[dest_q_itr], ptr);

				cf_queue_priority_unlock(priority_q);
				return CF_QUEUE_OK;
			}
		}
	}

	cf_queue_priority_unlock(priority_q);
	return CF_QUEUE_NOMATCH;
}
Пример #6
0
/* cf_queue_push
 *
 * */
int
cf_queue_push(cf_queue *q, void *ptr)
{
    /* FIXME arg check - and how do you do that, boyo? Magic numbers? */

    /* FIXME error */
    if (q->threadsafe && (0 != pthread_mutex_lock(&q->LOCK)))
        return(-1);

    /* Check queue length */
    if (CF_Q_SZ(q) == q->allocsz) {
        /* resize is a pain for circular buffers */
        if (0 != cf_queue_resize(q, q->allocsz * 2)) {
            if (q->threadsafe)
                pthread_mutex_unlock(&q->LOCK);
            // cf_warning(CF_QUEUE, "queue resize failure");
            return(-1);
        }
    }

    // todo: if queues are power of 2, this can be a shift
    memcpy(CF_Q_ELEM_PTR(q,q->write_offset), ptr, q->elementsz);
    q->write_offset++;
    // we're at risk of overflow if the write offset is that high
    if (q->write_offset & 0xC0000000) cf_queue_unwrap(q);

    if (q->threadsafe)
        pthread_cond_signal(&q->CV);

    /* FIXME blow a gasket */
    if (q->threadsafe && (0 != pthread_mutex_unlock(&q->LOCK)))
        return(-1);

    return(0);
}
Пример #7
0
int cf_queue_delete(cf_queue *q, void *buf, bool only_one) {
	if (NULL == q)
		return(CF_QUEUE_ERR);

	QUEUE_LOCK(q);
	
	bool found = false;
	
	if (CF_Q_SZ(q)) {

		for (uint32_t i = q->read_offset ;
			 i < q->write_offset ;
			 i++)
		{
			
			int rv = memcmp(CF_Q_ELEM_PTR(q,i), buf, q->elementsz);
			
			if (rv == 0) { // delete!
				cf_queue_delete_offset(q, i);
				found = true;
				if (only_one == true)	goto Done;
			}
		};
	}

Done:
	QUEUE_UNLOCK(q);

	if (found == false)
		return(CF_QUEUE_EMPTY);
	else
		return(CF_QUEUE_OK);
}
Пример #8
0
// we have to guard against wraparound, call this occasionally
// I really expect this will never get called....
// HOWEVER it can be a symptom of a queue getting really, really deep
//
void
cf_queue_unwrap(cf_queue *q)
{
    // cf_debug(CF_QUEUE, " queue memory unwrap!!!! queue: %p",q);
    int sz = CF_Q_SZ(q);
    q->read_offset %= q->allocsz;
    q->write_offset = q->read_offset + sz;
}
int cf_queue_priority_pop(cf_queue_priority *q, void *buf, int ms_wait)
{
	cf_queue_priority_lock(q);

	struct timespec tp;

	if (ms_wait > 0) {
		cf_set_wait_timespec(ms_wait, &tp);
	}

	if (q->threadsafe) {
		while (CF_Q_PRI_EMPTY(q)) {
			if (CF_QUEUE_FOREVER == ms_wait) {
				pthread_cond_wait(&q->CV, &q->LOCK);
			}
			else if (CF_QUEUE_NOWAIT == ms_wait) {
				pthread_mutex_unlock(&q->LOCK);
				return CF_QUEUE_EMPTY;
			}
			else {
				pthread_cond_timedwait(&q->CV, &q->LOCK, &tp);

				if (CF_Q_PRI_EMPTY(q)) {
					pthread_mutex_unlock(&q->LOCK);
					return CF_QUEUE_EMPTY;
				}
			}
		}
	}

	int rv = CF_QUEUE_EMPTY;

	if (CF_Q_SZ(q->high_q)) {
		rv = cf_queue_pop(q->high_q, buf, 0);
	}
	else if (CF_Q_SZ(q->medium_q)) {
		rv = cf_queue_pop(q->medium_q, buf, 0);
	}
	else if (CF_Q_SZ(q->low_q)) {
		rv = cf_queue_pop(q->low_q, buf, 0);
	}

	cf_queue_priority_unlock(q);
	return rv;
}
Пример #10
0
int cf_queue_sz(cf_queue *q) {
	int rv;

	QUEUE_LOCK(q);
	rv = CF_Q_SZ(q);
	QUEUE_UNLOCK(q);

	return(rv);
	
}
Пример #11
0
int
cf_queue_sz(cf_queue *q)
{
    int rv;

    if (q->threadsafe)
        pthread_mutex_lock(&q->LOCK);
    rv = CF_Q_SZ(q);
    if (q->threadsafe)
        pthread_mutex_unlock(&q->LOCK);
    return(rv);

}
Пример #12
0
int
cf_queue_reduce(cf_queue *q,  cf_queue_reduce_fn cb, void *udata)
{
    if (NULL == q)
        return(-1);

    /* FIXME error checking */
    if (q->threadsafe && (0 != pthread_mutex_lock(&q->LOCK)))
        return(-1);

    if (CF_Q_SZ(q)) {

        // it would be faster to have a local variable to hold the index,
        // and do it in bytes or something, but a delete
        // will change the read and write offset, so this is simpler for now
        // can optimize if necessary later....

        for (uint i = q->read_offset ;
                i < q->write_offset ;
                i++)
        {

            int rv = cb(CF_Q_ELEM_PTR(q, i), udata);

            // rv == 0 i snormal case, just increment to next point
            if (rv == -1) {
                break; // found what it was looking for
            }
            else if (rv == -2) { // delete!
                cf_queue_delete_offset(q, i);
                goto Found;
            }
        };
    }

Found:
    if (q->threadsafe && (0 != pthread_mutex_unlock(&q->LOCK))) {
        fprintf(stderr, "unlock failed\n");
        return(-1);
    }

    return(0);

}
Пример #13
0
//
// Reduce the inner queues whose priorities are different to 'new_pri'. If the
// callback returns -1, move that element to the inner queue whose priority is
// 'new_pri' and return CF_QUEUE_OK. Returns CF_QUEUE_NOMATCH if callback never
// triggers a move.
//
int cf_queue_priority_reduce_change(cf_queue_priority *priority_q, int new_pri, cf_queue_reduce_fn cb, void *udata)
{
	cf_queue_priority_lock(priority_q);

	cf_queue *queues[3];

	queues[0] = priority_q->high_q;
	queues[1] = priority_q->medium_q;
	queues[2] = priority_q->low_q;

	int dest_q_itr = CF_QUEUE_PRIORITY_HIGH - new_pri;
	cf_queue *q;

	for (int q_itr = 0; q_itr < 3; q_itr++) {
		q = queues[q_itr];

		if (q_itr == dest_q_itr || CF_Q_SZ(q) == 0) {
			continue;
		}

		for (uint32_t i = q->read_offset; i < q->write_offset; i++) {
			int rv = cb(CF_Q_ELEM_PTR(q, i), udata);

			if (rv == 0) {
				continue;
			}

			if (rv == -1) {
				// Found it - move to desired queue and return.
				uint8_t* buf = alloca(q->element_sz);

				memcpy(buf, CF_Q_ELEM_PTR(q, i), q->element_sz);
				cf_queue_delete_offset(q, i);
				cf_queue_push(queues[dest_q_itr], buf);

				cf_queue_priority_unlock(priority_q);
				return CF_QUEUE_OK;
			}
		}
	}

	cf_queue_priority_unlock(priority_q);
	return CF_QUEUE_NOMATCH;
}
Пример #14
0
//
// Internal function. Call with new size with lock held.
// *** THIS ONLY WORKS ON FULL QUEUES ***
//
int
cf_queue_resize(cf_queue *q, uint new_sz)
{
    // check - a lot of the code explodes badly if queue is not full
    if (CF_Q_SZ(q) != q->allocsz) {
        // cf_info(CF_QUEUE,"cf_queue: internal error: resize on non-full queue");
        return(-1);
    }

    // the rare case where the queue is not fragmented, and realloc makes sense
    // and none of the offsets need to move
    if (0 == q->read_offset % q->allocsz) {
        q->queue = realloc(q->queue, new_sz * q->elementsz);
        if (!q->queue) {
            // cf_info(CF_QUEUE," queue memory failure");
            return(-1);
        }
        q->read_offset = 0;
        q->write_offset = q->allocsz;
    }
    else {

        uint8_t *newq = malloc(new_sz * q->elementsz);
        if (!newq) {
            // cf_info(CF_QUEUE," queue resize memory failure");
            return(-1);
        }
        // endsz is used bytes in the old queue from the insert point to the end
        uint endsz = (q->allocsz - (q->read_offset % q->allocsz)) * q->elementsz;
        memcpy(&newq[0], CF_Q_ELEM_PTR(q, q->read_offset), endsz);
        memcpy(&newq[endsz], &q->queue[0], (q->allocsz * q->elementsz) - endsz);

        free(q->queue);
        q->queue = newq;

        q->write_offset = q->allocsz;
        q->read_offset = 0;
    }

    q->allocsz = new_sz;
    return(0);
}
Пример #15
0
/**
 * Use this function to find an element to pop from the queue using a reduce
 * callback function. Have the callback function return -1 when you want to pop
 * the element and stop reducing. If you have not popped an element,
 * CF_QUEUE_NOMATCH is returned.
 */
int cf_queue_priority_reduce_pop(cf_queue_priority *priority_q, void *buf, cf_queue_reduce_fn cb, void *udata)
{
	cf_queue_priority_lock(priority_q);

	cf_queue *queues[3];

	queues[0] = priority_q->high_q;
	queues[1] = priority_q->medium_q;
	queues[2] = priority_q->low_q;

	cf_queue *q;

	for (int q_itr = 0; q_itr < 3; q_itr++) {
		q = queues[q_itr];

		if (CF_Q_SZ(q) == 0) {
			continue;
		}

		for (uint32_t i = q->read_offset; i < q->write_offset; i++) {
			int rv = cb(CF_Q_ELEM_PTR(q, i), udata);

			if (rv == 0) {
				continue;
			}

			if (rv == -1) {
				// Found an element, so copy to 'buf', delete from q, and return.
				memcpy(buf, CF_Q_ELEM_PTR(q, i), q->element_sz);
				cf_queue_delete_offset(q, i);

				cf_queue_priority_unlock(priority_q);
				return CF_QUEUE_OK;
			}
		}
	}

	cf_queue_priority_unlock(priority_q);
	return CF_QUEUE_NOMATCH;
}
Пример #16
0
int
cf_queue_delete(cf_queue *q, void *buf, bool only_one)
{
    if (NULL == q)
        return(CF_QUEUE_ERR);

    /* FIXME error checking */
    if (q->threadsafe && (0 != pthread_mutex_lock(&q->LOCK)))
        return(CF_QUEUE_ERR);

    bool found = false;

    if (CF_Q_SZ(q)) {

        for (uint i = q->read_offset ;
                i < q->write_offset ;
                i++)
        {

            int rv = memcmp(CF_Q_ELEM_PTR(q,i), buf, q->elementsz);

            if (rv == 0) { // delete!
                cf_queue_delete_offset(q, i);
                found = true;
                if (only_one == true)	goto Done;
            }
        };
    }

Done:
    if (q->threadsafe && (0 != pthread_mutex_unlock(&q->LOCK))) {
        fprintf(stderr, "unlock failed\n");
        return(-1);
    }

    if (found == false)
        return(CF_QUEUE_EMPTY);
    else
        return(CF_QUEUE_OK);
}
Пример #17
0
int cf_queue_reduce(cf_queue *q,  cf_queue_reduce_fn cb, void *udata) {
	if (NULL == q)
		return(-1);

	QUEUE_LOCK(q);	

	if (CF_Q_SZ(q)) {
		
		// it would be faster to have a local variable to hold the index,
		// and do it in uint8_ts or something, but a delete
		// will change the read and write offset, so this is simpler for now
		// can optimize if necessary later....
		
		for (uint32_t i = q->read_offset ;
			 i < q->write_offset ;
			 i++)
		{
			
			int rv = cb(CF_Q_ELEM_PTR(q, i), udata);
			
			// rv == 0 i snormal case, just increment to next point
			if (rv == -1) {
				break; // found what it was looking for
			}
			else if (rv == -2) { // delete!
				cf_queue_delete_offset(q, i);
				goto Found;
			}
		};
	}
	
Found:	
	QUEUE_UNLOCK(q);

	return(0);
	
}
Пример #18
0
// we have to guard against wraparound, call this occasionally
// I really expect this will never get called....
// HOWEVER it can be a symptom of a queue getting really, really deep
//
void cf_queue_unwrap(cf_queue *q) {
	int sz = CF_Q_SZ(q);
	q->read_offset %= q->allocsz;
	q->write_offset = q->read_offset + sz;
}
Пример #19
0
/**
 * Use this function to find an element to pop from the queue using a reduce
 * callback function. Have the callback function
 * return -1 when you know you want to pop the element immediately, returns -2
 * when the element is the best candidate for popping found so far but you want
 * to keep looking, and returns 0 when you are not interested in popping
 * the element. You then pop the best candidate you've found - either the
 * "-1 case" or the last "-2 case". If you have not found a suitable candidate,
 * CF_QUEUE_NOMATCH is returned.
 */
int cf_queue_priority_reduce_pop(cf_queue_priority *priority_q,  void *buf, cf_queue_reduce_fn cb, void *udata)
{
    if (NULL == priority_q)
        return(-1);
	
    if (priority_q->threadsafe && (0 != pthread_mutex_lock(&priority_q->LOCK)))
        return(-1);
	
    int rv = 0;
	
    cf_queue *queues[3];
    queues[0] = priority_q->high_q;
    queues[1] = priority_q->medium_q;
    queues[2] = priority_q->low_q;
	
    cf_queue *q;
    int found_index = -1;
	
    for (int q_itr = 0; q_itr < 3; q_itr++)
    {
        q = queues[q_itr];
		
        if (CF_Q_SZ(q)) {
			
            // it would be faster to have a local variable to hold the index,
            // and do it in bytes or something, but a delete
            // will change the read and write offset, so this is simpler for now
            // can optimize if necessary later....
			
            for (uint i = q->read_offset ;
				 i < q->write_offset ;
				 i++)
            {
				
                rv = cb(CF_Q_ELEM_PTR(q, i), udata);
				
                // rv == 0 is normal case, just increment to next point
                if (rv == -1) {
                    found_index = i;
                    break; // found what it was looking for, so break
                }
                else if (rv == -2) {
                    // found new candidate, but keep looking for one better
                    found_index = i;
                }
				
            };
			
            break; // only traverse the highest priority q
        }
    }
	
    if (found_index >= 0) {
        // found an element, so memcpy to buf, delete from q, and return
        memcpy(buf, CF_Q_ELEM_PTR(q, found_index), q->elementsz);
        cf_queue_delete_offset(q, found_index);
    }
	
    if (priority_q->threadsafe && (0 != pthread_mutex_unlock(&priority_q->LOCK))) {
        return(-1);
    }
	
    if (found_index == -1)
        return(CF_QUEUE_NOMATCH);
	
    return(0);
}