示例#1
0
文件: queue_test.c 项目: Unidata/LDM
static void test_q_dequeue(void)
{
    Queue* q = q_new();
    int first;
    int status = q_enqueue(q, &first);
    int second;
    status = q_enqueue(q, &second);
    CU_ASSERT_PTR_EQUAL(q_dequeue(q), &first);
    CU_ASSERT_EQUAL(q_size(q), 1);
    CU_ASSERT_PTR_EQUAL(q_dequeue(q), &second);
    CU_ASSERT_EQUAL(q_size(q), 0);
    q_free(q);
}
示例#2
0
int check_queue()
{
  QUEUE *queue;
  char *item;

  size_t counter;

  queue = q_init();
  if(!queue)
    {
      fprintf(stderr, "unable to initialize queue\n");
      return 0;
    }

  for(counter = 0; datas[counter]; counter ++)
    q_enqueue(queue, datas[counter], strlen(datas[counter]) + 1);

  item = (char *)q_front(queue);
  if(!item)
    {
      fprintf(stderr, "got NULL when expecting %s\n", datas[counter]);
      return 1;
    }
  if(strcmp(item, datas[0]))
    {
      fprintf(stderr, "q_front() returned %s, expecting %s\n", item, datas[0]);
      return 2;
    }

  for(counter = 0; datas[counter]; counter ++)
    {
      item = (char *)q_dequeue(queue);
      if(!item || strcmp(item, datas[counter]))
	{
	  fprintf(stderr, "got %s, expecting %s\n", item, datas[counter]);
	  return 3;
	}
      free(item);
    }
  
  item = (char *)q_dequeue(queue);
  if(item)
    {
      fprintf(stderr, "got %s when expecting NULL\n", item);
      return 4;
    }

  q_free(queue, QUEUE_NODEALLOC);

  return 0;
}
示例#3
0
} END_TEST

START_TEST (multiple_items) {
  Job job1, job2;
  Queue *q = new_queue();
  assert_not_null(q);
  q_enqueue(q, &job1);
  q_enqueue(q, &job2);
  assert_false(q_empty(q));
  
  Job *d_job1 = q_dequeue(q);
  Job *d_job2 = q_dequeue(q);
  assert_equal(&job1, d_job1);
  assert_equal(&job2, d_job2);
  free_queue(q);
} END_TEST
/*
 * Service the interpolator (get the updated encoder counts, etc.)
 */
void
interpolator_service(void)
{
	if (q_head != NULL) {
	    uint32_t timedelta;
	    uint16_t delta;
	    switch (state) {
	    case STATE_OUT_OF_ENDZONE:
	        delta = abs(interpolator_get_absolute_target_position() - interpolator_get_current_position());
	        if (delta < CLOSE_ENOUGH_DEGREES) {
	            time_entered_end_zone = g_timers_state->ms_ticks;
	            state = STATE_IN_ENDZONE;
	        }
	        break;
	    case STATE_IN_ENDZONE:
	        timedelta = g_timers_state->ms_ticks - time_entered_end_zone;
//            LOG("%u, %u, %u\r\n", g_timers_state->ms_ticks, time_entered_end_zone, timedelta);
	        if (timedelta > ENDZONE_MS) {
                state = STATE_OUT_OF_ENDZONE;
                q_dequeue();
            }
	        break;
	    }
	}
}
示例#5
0
int env_setenv(const char *name,char *value,int flags)
{
    cfe_envvar_t *env;
    int namelen;

    env = env_findenv(name);
    if (env) {
	if (!(flags & ENV_FLG_ADMIN)) {
	    if (env->flags & ENV_FLG_READONLY) return CFE_ERR_ENVREADONLY;
	    }
	q_dequeue((queue_t *) env);
	KFREE(env);
	}

    namelen = strlen(name);

    env = KMALLOC(sizeof(cfe_envvar_t) + namelen + 1 + strlen(value) + 1,0);
    if (!env) return CFE_ERR_NOMEM;

    env->name = (char *) (env+1);
    env->value = env->name + namelen + 1;
    env->flags = (flags & ENV_FLG_MASK);

    strcpy(env->name,name);
    strcpy(env->value,value);

    q_enqueue(&env_envvars,(queue_t *) env);

    return 0;
}
示例#6
0
文件: queue_test.c 项目: Unidata/LDM
static void test_q_new(void)
{
    Queue* q = q_new();
    CU_ASSERT_PTR_NOT_NULL_FATAL(q);
    CU_ASSERT_PTR_NULL(q_dequeue(q));
    CU_ASSERT_EQUAL(q_size(q), 0);
    q_free(q);
}
示例#7
0
文件: uart.c 项目: Qub3k/robotic_arm
void uart_receive(char *data){
  unsigned int i = 0;
  
  /* Read the data from the queue and save it to the "data" variable */
  while(RxQ.Size != 0 && i < strlen(data)) {
    data[i] = q_dequeue(&RxQ);
    i++;
  }
}
示例#8
0
} END_TEST

START_TEST (single_item) {
  Job job;
  Queue *q = new_queue();
  assert_not_null(q);
  q_enqueue(q, &job);
  Job *d_job = q_dequeue(q);
  assert_equal(&job, d_job);
  free_queue(q);
} END_TEST
示例#9
0
static void cmd_eat_leading_white(queue_t *head)
{
    ui_token_t *t;

    while (!q_isempty(head)) {
	t = (ui_token_t *) q_getfirst(head);
	if (is_white_space(t)) {
	    q_dequeue(&(t->qb));
	    KFREE(t);
	    }
	else break;
	}
}
示例#10
0
int env_delenv(const char *name)
{
    cfe_envvar_t *env;

    env = env_findenv(name);

    if (!env) return 0;

    if (!(env->flags & ENV_FLG_READONLY)) {
	q_dequeue((queue_t *) env);
	KFREE(env);
	return 0;
	}

    return CFE_ERR_ENVNOTFOUND;
}
void sequence(chatmessage_t* message, packet_t* newpacket)
{
  message->seqnum = atoi(newpacket->packetbody);
  remove_elem(UNSEQ_CHAT_MSGS,(void*)message);
  q_enqueue(HBACK_Q,(void*)message);
  chatmessage_t* firstmessage = (chatmessage_t*)q_peek(HBACK_Q);
  pthread_mutex_lock(&seqno_mutex);
  if(firstmessage->messagetype == JOIN && SEQ_NO == -1) //my first message to display!
  {
    SEQ_NO = firstmessage->seqnum;
  }
  if(firstmessage->seqnum > SEQ_NO)
  {
    printf("SEQUENCE OUT OF SYNC. Skipping Ahead by %d messages\n",firstmessage->seqnum-SEQ_NO); 
   SEQ_NO = firstmessage->seqnum;
  }
  if(firstmessage->seqnum <= SEQ_NO)
  {
    SEQ_NO = firstmessage->seqnum + 1;
    client_t* firstclientmatchbyname;
    if(firstmessage->messagetype == CHAT)
    {
      //      printf("\E[34m%s\E(B\E[m (sequenced: %d):\t%s\n", firstmessage->sender, firstmessage->seqnum,firstmessage->messagebody);
      firstclientmatchbyname = find_client_by_uid(firstmessage->senderuid);
    }
    else
    {
      //      printf("\E[34m%s\E(B\E[m joined the chat (sequenced: %d)\n", firstmessage->messagebody, firstmessage->seqnum);
      firstclientmatchbyname = find_client_by_uid(firstmessage->senderuid);
    }

    char* uid = "";
	    
    if(firstclientmatchbyname != NULL)
    {
      uid = firstclientmatchbyname->uid;
      remove_elem(UNSEQ_CHAT_MSGS,firstmessage);
    }
    
    if(firstmessage->messagetype == CHAT)
      print_msg_with_senderids(firstmessage->sender,firstmessage->messagebody, uid);
    q_dequeue(HBACK_Q);
  }
  pthread_mutex_unlock(&seqno_mutex);

  return;
}
示例#12
0
文件: test.c 项目: chengw1005/alg_ex
void test_q()
{
    printf("\ntesting queue\n");

    int capacity = 128;
    struct qnode* q = q_create(capacity);
    
    int num = 200;
    for (int i = 0; i < num; ++i) {
        q_enqueue(q, i);
    }

    while (!q_empty(q)) {
        printf("%d ", q_dequeue(q));
    }

    q_destroy(q);
}
示例#13
0
/* Atiende al cliente que actualmente esta usando al servidor
 *
 * PRE: !q_is_empty(q) ("hay al menos un cliente en el servidor")
 *	*tsal == tiempo (absoluto) de salida del servidor del cliente actual
 *
 * busy = serve_customer (q, &tsal, &wtime)
 *
 * POS: *wtime == tiempo total que estuvo en el sistema
 *		  el cliente que acaba de ser atendido
 *
 *	 busy => *tsal == tiempo (absoluto) de salida del servidor
 *			  del proximo cliente
 *
 *	!busy => el servidor se quedo sin clientes
 *
 */
static bool serve_customer (queue_t q, double *tsal, double *wtime)
{
	bool busy = true;
	
	assert (!q_is_empty(q));
	
	/* Sacamos al cliente del servidor y registramos el tiempo total
	 * que estuvo dentro del sistema */
	*wtime = *tsal - q_dequeue (q);
	
	if (! q_is_empty(q) )
		/* Generamos un tiempo (absoluto) de salida del servidor para
		 * el proximo cliente que sera atendido */
		*tsal = *tsal + gen_exp (Ts);
	else
		busy = false;
	
	return busy;
}
示例#14
0
文件: uart.c 项目: Qub3k/robotic_arm
void UART0_IRQHandler(void) {
  NVIC_ClearPendingIRQ(UART0_IRQn);
  
  /* Transmitter part */
  if(UART0->S1 & UART_S1_TDRE_MASK) {
    if(!q_empty(&TxQ)){ // there is something to transmit
      UART0->D = q_dequeue(&TxQ);
    }else{ // there is nothing to transmit
      UART0->C2 &= ~UART_C2_TIE_MASK; // clear the interrupt flag
    }
  }
  
  /* Receiver part */
  if(UART0->S1 & UART_S1_RDRF_MASK) {
    if(!q_full(&RxQ)){ // there is still space to store something
      q_enqueue(&RxQ, UART0->D);
    }else{ // error - receiver queue full
      while(1);
    }
  }
}
示例#15
0
int moore(int source) {
    // distance between source vertex and current vertex
    int* dist;
    queue q;
    bool* isInQueue;
    long long thisLoopCount = 0, thisUpdateCount = 0;
    // Initialize
    dist =(int *) malloc((N+1) * sizeof(int));
    isInQueue =(bool *) malloc((N+1) * sizeof(bool));
    for(int i = 1; i <= N; i++) dist[i] = INF;
    for(int i = 1; i <= N; i++) isInQueue[i] = false;
    q_init(&q);
    dist[source] = 0;
    isInQueue[source] = true;
    q_enqueue(source, &q);
    // Loop over entries in queue
//  #pragma omp parallel shared(dist, adj_listhead, q)
//  #pragma omp single
    while(!q_isEmpty(&q)) {
        int vi;
//    #pragma omp critical(queue)
        vi = q_dequeue(&q);
        isInQueue[vi] = false;
//    #pragma omp task
        {
            process(vi, dist, &q, isInQueue, &thisLoopCount, &thisUpdateCount);
        }
    } // All done
    // implicit barrier
    // all tasks should be finished below this line
    if(DEBUG) {
        printf("source = %d, ", source);
        printf("%d %d %d", dist[1], dist[N-1], dist[N]);
        printf("\n");
    }
    free(dist);
    free(isInQueue);
    loopCount[omp_get_thread_num()] += thisLoopCount;
    updateCount[omp_get_thread_num()] += thisUpdateCount;
}
示例#16
0
/** Dequeue a Job from the queue.
 *
 *  Waits until a Job has been added to the queue if
 *  the queue is empty, times out after seconds.
 *
 *  This was helped by http://www.yolinux.com/TUTORIALS/LinuxTutorialPosixThreads.html#BASICS
 */
void * q_dequeue_or_wait(Queue * q, int seconds) {
  void *job = NULL;  
  struct timeval tv;
  struct timespec ts;
  gettimeofday(&tv,NULL);
  
  /* Convert from timeval to timespec */
  ts.tv_sec  = tv.tv_sec;
  ts.tv_nsec = tv.tv_usec * 1000;
  ts.tv_sec += seconds;
      
  /* The algorith here is first check if there is a job in the queue.
   *  - If there is no job wait until a job is added.
   *   - When a job is added the thread is woken then we try and
   *     take the job off the queue.
   *  - Keep doing this until we get a job.
   */
  while (NULL == job) {
    trace("getting cond lock: %i", pthread_self());
    pthread_mutex_lock(&(q->wait_condition_mutex));
    if (NULL == q->front) {
      trace("pthread_cond_timedwait: %i", pthread_self());
      int rc = pthread_cond_timedwait(&(q->wait_condition), &(q->wait_condition_mutex), &ts);
      trace("queue wait return: %i", pthread_self());
      if (ETIMEDOUT == rc) {        
        trace("queue wait time out: %i", pthread_self());
        pthread_mutex_unlock(&(q->wait_condition_mutex));
        break;
      }
    }
    pthread_mutex_unlock(&(q->wait_condition_mutex));
    
    job = q_dequeue(q);
  }
  //debug("returning %x as job", job);
  return job;
}
示例#17
0
static void
DFLOWworker(void *T)
{
	struct worker *t = (struct worker *) T;
	DataFlow flow;
	FlowEvent fe = 0, fnxt = 0;
	int id = (int) (t - workers);
	Thread thr;
	str error = 0;
	int i,last;
	Client cntxt;
	InstrPtr p;

	thr = THRnew("DFLOWworker");

	GDKsetbuf(GDKmalloc(GDKMAXERRLEN)); /* where to leave errors */
	GDKerrbuf[0] = 0;
	MT_lock_set(&dataflowLock, "DFLOWworker");
	cntxt = t->cntxt;
	MT_lock_unset(&dataflowLock, "DFLOWworker");
	if (cntxt) {
		/* wait until we are allowed to start working */
		MT_sema_down(&t->s, "DFLOWworker");
	}
	while (1) {
		if (fnxt == 0) {
			MT_lock_set(&dataflowLock, "DFLOWworker");
			cntxt = t->cntxt;
			MT_lock_unset(&dataflowLock, "DFLOWworker");
			fe = q_dequeue(todo, cntxt);
			if (fe == NULL) {
				if (cntxt) {
					/* we're not done yet with work for the current
					 * client (as far as we know), so give up the CPU
					 * and let the scheduler enter some more work, but
					 * first compensate for the down we did in
					 * dequeue */
					MT_sema_up(&todo->s, "DFLOWworker");
					MT_sleep_ms(1);
					continue;
				}
				/* no more work to be done: exit */
				break;
			}
		} else
			fe = fnxt;
		if (ATOMIC_GET(exiting, exitingLock, "DFLOWworker")) {
			break;
		}
		fnxt = 0;
		assert(fe);
		flow = fe->flow;
		assert(flow);

		/* whenever we have a (concurrent) error, skip it */
		if (flow->error) {
			q_enqueue(flow->done, fe);
			continue;
		}

		/* skip all instructions when we have encontered an error */
		if (flow->error == 0) {
#ifdef USE_MAL_ADMISSION
			if (MALadmission(fe->argclaim, fe->hotclaim)) {
				fe->hotclaim = 0;   /* don't assume priority anymore */
				if (todo->last == 0)
					MT_sleep_ms(DELAYUNIT);
				q_requeue(todo, fe);
				continue;
			}
#endif
			error = runMALsequence(flow->cntxt, flow->mb, fe->pc, fe->pc + 1, flow->stk, 0, 0);
			PARDEBUG fprintf(stderr, "#executed pc= %d wrk= %d claim= " LLFMT "," LLFMT " %s\n",
							 fe->pc, id, fe->argclaim, fe->hotclaim, error ? error : "");
#ifdef USE_MAL_ADMISSION
			/* release the memory claim */
			MALadmission(-fe->argclaim, -fe->hotclaim);
#endif
			/* update the numa information. keep the thread-id producing the value */
			p= getInstrPtr(flow->mb,fe->pc);
			for( i = 0; i < p->argc; i++)
				flow->mb->var[getArg(p,i)]->worker = thr->tid;

			MT_lock_set(&flow->flowlock, "DFLOWworker");
			fe->state = DFLOWwrapup;
			MT_lock_unset(&flow->flowlock, "DFLOWworker");
			if (error) {
				MT_lock_set(&flow->flowlock, "DFLOWworker");
				/* only collect one error (from one thread, needed for stable testing) */
				if (!flow->error)
					flow->error = error;
				MT_lock_unset(&flow->flowlock, "DFLOWworker");
				/* after an error we skip the rest of the block */
				q_enqueue(flow->done, fe);
				continue;
			}
		}

		/* see if you can find an eligible instruction that uses the
		 * result just produced. Then we can continue with it right away.
		 * We are just looking forward for the last block, which means we
		 * are safe from concurrent actions. No other thread can steal it,
		 * because we hold the logical lock.
		 * All eligible instructions are queued
		 */
#ifdef USE_MAL_ADMISSION
		{
		InstrPtr p = getInstrPtr(flow->mb, fe->pc);
		assert(p);
		fe->hotclaim = 0;
		for (i = 0; i < p->retc; i++)
			fe->hotclaim += getMemoryClaim(flow->mb, flow->stk, p, i, FALSE);
		}
#endif
		MT_lock_set(&flow->flowlock, "DFLOWworker");

		for (last = fe->pc - flow->start; last >= 0 && (i = flow->nodes[last]) > 0; last = flow->edges[last])
			if (flow->status[i].state == DFLOWpending &&
				flow->status[i].blocks == 1) {
				flow->status[i].state = DFLOWrunning;
				flow->status[i].blocks = 0;
				flow->status[i].hotclaim = fe->hotclaim;
				flow->status[i].argclaim += fe->hotclaim;
				fnxt = flow->status + i;
				break;
			}
		MT_lock_unset(&flow->flowlock, "DFLOWworker");

		q_enqueue(flow->done, fe);
		if ( fnxt == 0) {
			int last;
			MT_lock_set(&todo->l, "DFLOWworker");
			last = todo->last;
			MT_lock_unset(&todo->l, "DFLOWworker");
			if (last == 0)
				profilerHeartbeatEvent("wait", 0);
		}
	}
	GDKfree(GDKerrbuf);
	GDKsetbuf(0);
	THRdel(thr);
	MT_lock_set(&dataflowLock, "DFLOWworker");
	t->flag = EXITED;
	MT_lock_unset(&dataflowLock, "DFLOWworker");
}
示例#18
0
文件: dequeue.c 项目: wlan0/Crumbler
/*! \fn void* x_dequeue(struct Dequeue *queue)
 *  \brief deletes the oldest element from queue and returns it<br/>
 *  <b> Precondition : The queue should have been initialized</b> <br/>
 *  <b> Postcondition : The last element is returned. If no element was present, then NULL is returned</b> <br/>
 *  \param queue The queue which is being dequeued
 *  \return a pointer to the data. IMPORTANT - The programmer is responsible for typecasting the data to the appropriate type before using it.
 */
void* x_dequeue(struct Dequeue *queue)
{
	void *ptr;
	ptr = q_dequeue(queue->queue,ptr,queue->size_of_data);
	return ptr;
}
示例#19
0
static str
DFLOWscheduler(DataFlow flow, struct worker *w)
{
	int last;
	int i;
#ifdef USE_MAL_ADMISSION
	int j;
	InstrPtr p;
#endif
	int tasks=0, actions;
	str ret = MAL_SUCCEED;
	FlowEvent fe, f = 0;

	if (flow == NULL)
		throw(MAL, "dataflow", "DFLOWscheduler(): Called with flow == NULL");
	actions = flow->stop - flow->start;
	if (actions == 0)
		throw(MAL, "dataflow", "Empty dataflow block");
	/* initialize the eligible statements */
	fe = flow->status;

	MT_lock_set(&flow->flowlock, "DFLOWscheduler");
	for (i = 0; i < actions; i++)
		if (fe[i].blocks == 0) {
#ifdef USE_MAL_ADMISSION
			p = getInstrPtr(flow->mb,fe[i].pc);
			if (p == NULL) {
				MT_lock_unset(&flow->flowlock, "DFLOWscheduler");
				throw(MAL, "dataflow", "DFLOWscheduler(): getInstrPtr(flow->mb,fe[i].pc) returned NULL");
			}
			for (j = p->retc; j < p->argc; j++)
				fe[i].argclaim = getMemoryClaim(fe[0].flow->mb, fe[0].flow->stk, p, j, FALSE);
#endif
			q_enqueue(todo, flow->status + i);
			flow->status[i].state = DFLOWrunning;
			PARDEBUG fprintf(stderr, "#enqueue pc=%d claim=" LLFMT "\n", flow->status[i].pc, flow->status[i].argclaim);
		}
	MT_lock_unset(&flow->flowlock, "DFLOWscheduler");
	MT_sema_up(&w->s, "DFLOWscheduler");

	PARDEBUG fprintf(stderr, "#run %d instructions in dataflow block\n", actions);

	while (actions != tasks ) {
		f = q_dequeue(flow->done, NULL);
		if (ATOMIC_GET(exiting, exitingLock, "DFLOWscheduler"))
			break;
		if (f == NULL)
			throw(MAL, "dataflow", "DFLOWscheduler(): q_dequeue(flow->done) returned NULL");

		/*
		 * When an instruction is finished we have to reduce the blocked
		 * counter for all dependent instructions.  for those where it
		 * drops to zero we can scheduler it we do it here instead of the scheduler
		 */

		MT_lock_set(&flow->flowlock, "DFLOWscheduler");
		tasks++;
		for (last = f->pc - flow->start; last >= 0 && (i = flow->nodes[last]) > 0; last = flow->edges[last])
			if (flow->status[i].state == DFLOWpending) {
				flow->status[i].argclaim += f->hotclaim;
				if (flow->status[i].blocks == 1 ) {
					flow->status[i].state = DFLOWrunning;
					flow->status[i].blocks--;
					q_enqueue(todo, flow->status + i);
					PARDEBUG fprintf(stderr, "#enqueue pc=%d claim= " LLFMT "\n", flow->status[i].pc, flow->status[i].argclaim);
				} else {
					flow->status[i].blocks--;
				}
			}
		MT_lock_unset(&flow->flowlock, "DFLOWscheduler");
	}
	/* release the worker from its specific task (turn it into a
	 * generic worker) */
	MT_lock_set(&dataflowLock, "DFLOWscheduler");
	w->cntxt = NULL;
	MT_lock_unset(&dataflowLock, "DFLOWscheduler");
	/* wrap up errors */
	assert(flow->done->last == 0);
	if (flow->error ) {
		PARDEBUG fprintf(stderr, "#errors encountered %s ", flow->error ? flow->error : "unknown");
		ret = flow->error;
	}
	return ret;
}