示例#1
0
void rosout_post(UrosString *strp, uros_bool_t costant, uint8_t level,
                 const char *fileszp, int line, const char *funcp) {

  static uint32_t seq = 0;

  struct msg__rosgraph_msgs__Log *msgp;

  urosAssert(urosStringIsValid(strp));

  msgp = urosNew(NULL, struct msg__rosgraph_msgs__Log);
  urosAssert(msgp != NULL);
  init_msg__rosgraph_msgs__Log(msgp);

  msgp->header.frame_id = urosStringAssignZ(costant ? "1" : "0");
  msgp->header.seq = seq++;
  msgp->header.stamp.sec = urosGetTimestampMsec();
  msgp->header.stamp.nsec = (msgp->header.stamp.sec % 1000) * 1000000;
  msgp->header.stamp.sec /= 1000;
  msgp->level = level;
  msgp->name = urosNode.config.nodeName;
  msgp->msg = *strp;
  msgp->file = urosStringAssignZ(fileszp);
  msgp->function = urosStringAssignZ(funcp);
  msgp->line = line;

  fifo_enqueue(&rosoutQueue, (void *)msgp);
}
示例#2
0
文件: scatter_gather.c 项目: 8l/beri
static void process()
{
    while (end_scatter_gather == 0)
    {
        unsigned int tmp_token;
        if(fifo_dequeue_non_blocking(&scatter_to_process, &tmp_token))
        {
            fifo_enqueue(&process_to_gather, tmp_token);
            // display critical section //
            uart_lock_acquire();
            uart_puts("process on core ");
            uart_putd(core_id());
            uart_puts(" - processing token ");
            uart_putd(tmp_token);
            uart_putc('\n');
            uart_lock_release();
            // display critical section //
        }
    }
    // display critical section //
    uart_lock_acquire();
    uart_puts("process on core ");
    uart_putd(core_id());
    uart_puts(" - END\n");
    uart_lock_release();
    // display critical section //
}
示例#3
0
文件: scatter_gather.c 项目: 8l/beri
static void scatter()
{
    unsigned long int nb_token = 0;
    while (nb_token < MAX_TOKEN)
    {
        // display critical section //
        uart_lock_acquire();
        uart_puts("scatter on core ");
        uart_putd(core_id());
        uart_puts(" - generating token ");
        uart_putd(nb_token);
        uart_putc('\n');
        uart_lock_release();
        // display critical section //
        fifo_enqueue(&scatter_to_process, nb_token);
        nb_token++;
    }
    // display critical section //
    uart_lock_acquire();
    uart_puts("scatter on core ");
    uart_putd(core_id());
    uart_puts(" - END\n");
    uart_lock_release();
    // display critical section //
}
示例#4
0
/** output one character */
void uart_putc(uint8_t data)
{
    /* store data */
    fifo_enqueue((fifo_t *)&global_uart.tx, data);

    /* enable interrupt */
    _UCSRB_UART0 |= _BV(_UDRIE_UART0);
}
static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
			   int dir, int id, int len)
{
	char *fifobar;
	int entry;

	if (dir == SENDQ)
		fifobar = ccb->ccb_u1.send_fifobar;
	else
		fifobar = ccb->ccb_u3.recv_fifobar;

	entry = mk_entry(id, len);
	return fifo_enqueue(hw, fifobar, entry);
}
示例#6
0
/*
 * This function is called as a result of the "eth_drv_recv()" call above.
 * It's job is to actually fetch data for a packet from the hardware once
 * memory buffers have been allocated for the packet. Note that the buffers
 * may come in pieces, using a scatter-gather list. This allows for more
 * efficient processing in the upper layers of the stack.
 */
static void dp83902a_recv(struct nic_priv_data *dp, int len)
{
	u8 *base = dp->base;
	int i, mlen;
	u8 saved_char = 0;
	bool saved;

	/* Read incoming packet data */
	DP_OUT(base, DP_CR, DP_CR_PAGE0 | DP_CR_NODMA | DP_CR_START);
	DP_OUT(base, DP_RBCL, len & 0xFF);
	DP_OUT(base, DP_RBCH, len >> 8);
	DP_OUT(base, DP_RSAL, 4);		/* Past header */
	DP_OUT(base, DP_RSAH, dp->rx_next);
	DP_OUT(base, DP_ISR, DP_ISR_RDC); /* Clear end of DMA */
	DP_OUT(base, DP_CR, DP_CR_RDMA | DP_CR_START);

	saved = false;
	for (i = 0; i < 1; i++) {
		mlen = len;
		while (0 < mlen) {
			/* Saved byte from previous loop? */
			if (saved) {
				fifo_enqueue(dp->rx_rb, &saved_char, TRUE);
				mlen--;
				saved = false;
				continue;
			}
			
			{
				u8 tmp;
				DP_IN_DATA(dp->data, tmp);
				fifo_enqueue(dp->rx_rb, &tmp, TRUE);
				mlen--;
			}
		}
	}
}
示例#7
0
struct mempool *mempool_ram_create(u32 entity_size,
				   u32 page_count,
				   u32 mem_flags)
{
	u32 e;
	virtual_addr_t va;
	struct mempool *mp;

	if (!entity_size ||
	    ((VMM_PAGE_SIZE * page_count) < entity_size)) {
		return NULL;
	}

	mp = vmm_zalloc(sizeof(struct mempool));
	if (!mp) {
		return NULL;
	}

	mp->type = MEMPOOL_TYPE_RAM;
	mp->entity_size = entity_size;
	mp->entity_count =
		udiv64((VMM_PAGE_SIZE * page_count), entity_size);

	mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count);
	if (!mp->f) {
		vmm_free(mp);
		return NULL;
	}

	mp->entity_base = vmm_host_alloc_pages(page_count, mem_flags);
	if (!mp->entity_base) {
		fifo_free(mp->f);
		vmm_free(mp);
		return NULL;
	}
	mp->d.ram.page_count = page_count;
	mp->d.ram.mem_flags = mem_flags;

	for (e = 0; e < mp->entity_count; e++) {
		va = mp->entity_base + e * entity_size;
		fifo_enqueue(mp->f, &va, FALSE);
	}

	return mp;
}
示例#8
0
struct mempool *mempool_raw_create(u32 entity_size,
				   physical_addr_t phys,
				   virtual_size_t size,
				   u32 mem_flags)
{
	u32 e;
	virtual_addr_t va;
	struct mempool *mp;

	if (!entity_size || (size < entity_size)) {
		return NULL;
	}

	mp = vmm_zalloc(sizeof(struct mempool));
	if (!mp) {
		return NULL;
	}

	mp->type = MEMPOOL_TYPE_RAW;
	mp->entity_size = entity_size;
	mp->entity_count = udiv64(size, entity_size);

	mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count);
	if (!mp->f) {
		vmm_free(mp);
		return NULL;
	}

	mp->entity_base = vmm_host_memmap(phys, size, mem_flags);
	if (!mp->entity_base) {
		fifo_free(mp->f);
		vmm_free(mp);
		return NULL;
	}
	mp->d.raw.phys = phys;
	mp->d.raw.size = size;
	mp->d.raw.mem_flags = mem_flags;

	for (e = 0; e < mp->entity_count; e++) {
		va = mp->entity_base + e * entity_size;
		fifo_enqueue(mp->f, &va, FALSE);
	}

	return mp;
}
示例#9
0
int mempool_free(struct mempool *mp, void *entity)
{
	virtual_addr_t entity_va;

	if (!mp) {
		return VMM_EFAIL;
	}

	if (!mempool_check_ptr(mp, entity)) {
		return VMM_EINVALID;
	}

	entity_va = (virtual_addr_t)entity;
	if (!fifo_enqueue(mp->f, &entity_va, FALSE)) {
		return VMM_ENOSPC;
	}

	return VMM_OK;
}
示例#10
0
int main( int argc, char **argv ) {
	fifo_queue_t q;
	fifo_init(&q);

	#pragma omp parallel
	#pragma omp single nowait
	{
		int i;
		for(i=1;i<5;++i) {
			#pragma omp task
			{
				int j;
				for(j = 0; j < 1000; ++j) {
					fifo_enqueue(&q, i*1000+j);
				}
			}
			#pragma omp task
			{
				int d, j;
				for(j = 0; j < 1000; ++j) {
					d = fifo_dequeue(&q);
					if (d)
						printf("dequeue %d\n", d);
				}
			}
		}
	}
	
	int d;
	while (true) {
		d = fifo_dequeue(&q);
		if (d == -1)
			break;
		printf("dequeue %d\n", d);
	}
	assert(fifo_empty(&q));
	
	#pragma omp taskwait
	
	fifo_free(&q);
	
	return 0;
}
示例#11
0
struct mempool *mempool_heap_create(u32 entity_size,
				    u32 entity_count)
{
	u32 e;
	virtual_addr_t va;
	struct mempool *mp;

	if (!entity_size || !entity_count) {
		return NULL;
	}

	mp = vmm_zalloc(sizeof(struct mempool));
	if (!mp) {
		return NULL;
	}

	mp->type = MEMPOOL_TYPE_HEAP;
	mp->entity_size = entity_size;
	mp->entity_count = entity_count;

	mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count);
	if (!mp->f) {
		vmm_free(mp);
		return NULL;
	}

	mp->entity_base =
		(virtual_addr_t)vmm_malloc(entity_size * entity_count);
	if (!mp->entity_base) {
		fifo_free(mp->f);
		vmm_free(mp);
		return NULL;
	}

	for (e = 0; e < mp->entity_count; e++) {
		va = mp->entity_base + e * entity_size;
		fifo_enqueue(mp->f, &va, FALSE);
	}

	return mp;
}
示例#12
0
文件: mempool.c 项目: CoryXie/xvisor
int mempool_free(struct mempool *mp, void *buf)
{
	virtual_addr_t buf_va;

	if (!mp) {
		return VMM_EFAIL;
	}

	buf_va = (virtual_addr_t)buf;

	if ((buf_va < mp->page_base) || 
	    ((mp->page_base + (mp->page_count * VMM_PAGE_SIZE)) < buf_va)) {
		return VMM_EINVALID;
	}

	if (!fifo_enqueue(mp->f, &buf_va, FALSE)) {
		return VMM_EFAIL;
	}

	return VMM_OK;
}
示例#13
0
void *enquer(void *arg)
{
    int i, j;

    inc(&b1);
    while (b1!=(NENQER+NDEQER));
    inc(&b2);
    while (b2!=(NENQER+NDEQER));

    for (j=0; j<NITER; j++) {
        i=anf32(&n, 1);
        fifo_enqueue(q, (void *)i);
    }

    dec(&c1);
    while (c1!=0);
    dec(&c2);
    while (c2!=0);

    return NULL;
}
示例#14
0
int main(int argc, char *argv[])
{
    pthread_t thrs[NENQER+NDEQER];

    int i, j;
    int qlen=FIFO_LEN_DEFAULT;

    if (argc>1) {
        qlen=atoi(argv[1]);
    }

    q=fifo_create(qlen); /* smaller buffer to saturate the buffer */
# if (CHECK)
    chararr=(char *)calloc(NITER*NENQER, sizeof(char));
# endif

    for (i=0; i<NENQER; i++)
        pthread_create(thrs+i, NULL, enquer, NULL);
    for (; i<NENQER+NDEQER; i++)
        pthread_create(thrs+i, NULL, dequer, NULL);

    for (i=0; i<NENQER; i++)
        pthread_join(thrs[i], NULL);

    for (j=0; j<NDEQER; j++)
        fifo_enqueue(q, (void *)0);

    for (; i<NENQER+NDEQER; i++)
        pthread_join(thrs[i], NULL);

# if (CHECK)
    for (j=0; j<NITER*NENQER; j++)
        if (chararr[j]==0) {
            printf("%d hasn't been seen\n", j+1);
        }
# endif
    return 0;
}
示例#15
0
文件: mempool.c 项目: CoryXie/xvisor
struct mempool *mempool_create(u32 buf_size, u32 buf_count)
{
	u32 b;
	virtual_addr_t va;
	struct mempool *mp;

	mp = vmm_zalloc(sizeof(struct mempool));
	if (!mp) {
		return NULL;
	}

	mp->f = fifo_alloc(sizeof(virtual_addr_t), buf_count);
	if (!mp->f) {
		vmm_free(mp);
		return NULL;
	}

	mp->buf_count = buf_count;
	mp->buf_size = buf_size;

	mp->page_count = VMM_SIZE_TO_PAGE(buf_size * buf_count);
	mp->page_base = vmm_host_alloc_pages(mp->page_count, 
					     VMM_MEMORY_FLAGS_NORMAL);
	if (!mp->page_base) {
		fifo_free(mp->f);
		vmm_free(mp);
		return NULL;
	}

	for (b = 0; b < mp->buf_count; b++) {
		va = mp->page_base + b * buf_size;
		fifo_enqueue(mp->f, &va, FALSE);
	}

	return mp;
}
示例#16
0
文件: opnormt.c 项目: jjgreen/opnorm
static void* worker(tdat_t *td)
{

#ifdef HAVE_SIGNAL_H

  /* set this thread as cancellable */

  if (set_cancellable() != 0)
    {
      td->status = OPNORM_THREAD;
      return NULL;
    }

#endif

  /*
    unpack arguments, mostly for readability but also saves
    a few dereferences
  */

  tdat_shared_t *ts = td->shared;

  index_t
    n = ts->n,
    m = ts->m;

  double
    p    = ts->p,
    q    = ts->q,
    eps  = ts->eps,
    LCRP = ts->LCRP,
    SCD  = ts->SCD;

  const double *M = ts->M;

  fifo_t *fifo = ts->fifo;

  /* working data */

  double  pcent[n];
  cube_t  cube0, cube1;
  patch_t patch;
  double  tmax = 0.0;

  patch.centres = pcent;

  int fifoerr;

  while (1)
    {
      /* thread cancellation point  */

      pthread_testcancel();

      /* dequeue a cube */

      if (pthread_mutex_lock(&(ts->fifolock)) < 0)
	{
	  td->status = OPNORM_THREAD;
	  return NULL;
	}

      fifoerr = fifo_dequeue(fifo, &cube0);

      if (pthread_mutex_unlock(&(ts->fifolock)) < 0)
	{
	  td->status = OPNORM_THREAD;
	  return NULL;
	}

      if (fifoerr != FIFO_OK)
	{
	  td->status = (fifoerr == FIFO_EMPTY ?
			OPNORM_OK :
			OPNORM_FIFO);
	  return NULL;
	}

      cube_print(&cube0, n, ">");

      /* nfifo is the total number dequeue */

      td->nfifo++;

      /* cube subdivide */

      int hwnexp = cube0.hwnexp + 1;
      double halfwidth = ldexp(1, -hwnexp);

      /*
	if halfwidth < DBL_EPSILON then we cannot
	calulate the centres of the subdivided cubes
	accurately, we break out and report that the
	requested accuracy could not be achieved
      */

      if (halfwidth < DBL_EPSILON)
	{
	  td->status = OPNORM_INACC;
	  return NULL;
	}

      for (size_t k = 0 ; k < (1UL << (n-1)) ; k++)
	{
	  cube1.side = cube0.side;
	  cube1.hwnexp = hwnexp;

	  /*
	    we give our cube1 a temporary set of centres
	    while we evaluate and decide whether to jetison
	    or enqueue it, only if the latter do we make a
	    malloc and copy the temporary centres.  this
	    saves a *lot* of malloc/free pairs
	  */

	  double centres[n];
	  cube1.centres = centres;

	  size_t k0 = k;

	  for (size_t j = 0 ; j < n ; j++)
	    {
	      if (cube0.side == j)
		{
		  cube1.centres[j] = cube0.centres[j];
		  continue;
		}

	      cube1.centres[j] = cube0.centres[j] +
		((k0 % 2) ? halfwidth : -halfwidth);

	      k0 /= 2;
	    }

	  cube_print(&cube1, n, "<");

	  /* get the corresponding patch */

	  cube_to_patch(&cube1, n, p, LCRP, SCD, &patch);
	  patch_print(patch, n);

	  double ratio = radius_to_ratio(patch.radius, p);

	  /*
	    check for patch viability - this check almost
	    always succeeds (on Drury K, this is false in
	    80/164016 cases, so 0.05% of the time) very
	    small beer. Yet it introduces a branch point,
	    so one might think it worth removing. Timing
	    tests indicate that there is no speedup in
	    doing so, so we keep it.
	  */

	  if (ratio > 0)
	    {
	      /* evaluate M at patch centre */

	      double v[m];

	      ts->matvec(M, pcent, m, n, v);
	      td->neval++;

	      double t = pnorm(v, m, q);

	      /* test first with the previous value of tmax */

	      if (t < tmax)
		{
		  /* test whether we can jettison this cube */

		  if (t < (tmax * ratio * (1 + eps))) continue;

		  /*
		    note that the global ts->tmax >= tmax so it would
		    be pointless (and cost a mutex access) to test
		    for that here
		  */
		}
	      else
		{
		  if (pthread_mutex_lock(&(ts->maxlock)) < 0)
		    {
		      td->status = OPNORM_THREAD;
		      return NULL;
		    }

		  /* update local tmax from global */

		  tmax = ts->tmax;

		  /*
		    if we have found a new global maximum then we
		    update it (and the global maximising vector)
		    as well as the local copy
		  */

		  if (t > tmax)
		    {
		      ts->tmax = tmax = t;

		      if (ts->vmax)
			memcpy(ts->vmax, pcent, n*sizeof(double));
		    }

		  if (pthread_mutex_unlock(&(ts->maxlock)) < 0)
		    {
		      td->status = OPNORM_THREAD;
		      return NULL;
		    }

		  /*
		    test whether we can jettison this cube but
		    now with the updated value of tmax
		  */

		  if (t < (tmax * ratio * (1 + eps))) continue;

		}
	    }

	  /*
	    we will enqueue this cube, so we need to
	    allocate and copy its temporary centres set
	  */

	  if (! (cube1.centres = malloc(n*sizeof(double))))
	    {
	      td->status = OPNORM_ALLOC;
	      return NULL;
	    }

	  memcpy(cube1.centres, centres, n*sizeof(double));

	  if (pthread_mutex_lock(&(ts->fifolock)) < 0)
	    {
	      free(cube1.centres);
	      td->status = OPNORM_THREAD;
	      return NULL;
	    }

	  fifoerr = fifo_enqueue(fifo, &cube1);

	  if (pthread_mutex_unlock(&(ts->fifolock)) < 0)
	    {
	      td->status = OPNORM_THREAD;
	      return NULL;
	    }

	  switch(fifoerr)
	    {
	    case FIFO_OK:
	      break;
	    case FIFO_USERMAX:
	      td->status = OPNORM_FIFOMAX;
	      return NULL;
	    default:
	      td->status = OPNORM_FIFO;
	      return NULL;
	    }
	}

      free(cube0.centres);
    }

  /* we should not arrive here */

  td->status = OPNORM_BUG;

  return NULL;
}