Esempio n. 1
0
static void handle_attribute_aligned(const attribute_t *attribute,
                                     entity_t *entity)
{
	int alignment = 32; /* TODO: fill in maximum useful alignment for
	                             target machine */
	if (attribute->a.arguments) {
		attribute_argument_t *argument = attribute->a.arguments;
		alignment = fold_expression_to_int(argument->v.expression);
	}

	if (!is_po2(alignment)) {
		errorf(&attribute->pos, "alignment must be a power of 2 but is %d", alignment);
		return;
	}
	if (alignment <= 0) {
		errorf(&attribute->pos, "alignment must be bigger than 0 but is %d", alignment);
		return;
	}

	switch (entity->kind) {
	case DECLARATION_KIND_CASES:
		entity->declaration.alignment = alignment;
		break;
	case ENTITY_STRUCT:
	case ENTITY_UNION:
		entity->compound.alignment = MAX(entity->compound.alignment, alignment);
		break;

	default:
		warningf(WARN_OTHER, &attribute->pos, "alignment attribute specification on %N ignored", entity);
		break;
	}
}
Esempio n. 2
0
void
stp_dither_matrix_iterated_init(stp_dither_matrix_impl_t *mat, size_t size,
				size_t expt, const unsigned *array)
{
  int i;
  int x, y;
  mat->base = size;
  mat->exp = expt;
  mat->x_size = 1;
  for (i = 0; i < expt; i++)
    mat->x_size *= mat->base;
  mat->y_size = mat->x_size;
  mat->total_size = mat->x_size * mat->y_size;
  mat->matrix = stp_malloc(sizeof(unsigned) * mat->x_size * mat->y_size);
  for (x = 0; x < mat->x_size; x++)
    for (y = 0; y < mat->y_size; y++)
      {
	mat->matrix[x + y * mat->x_size] =
	  calc_ordered_point(x, y, mat->exp, 1, mat->base, array);
	mat->matrix[x + y * mat->x_size] =
	  (double) mat->matrix[x + y * mat->x_size] * 65536.0 /
	  (double) (mat->x_size * mat->y_size);
      }
  mat->last_x = mat->last_x_mod = 0;
  mat->last_y = mat->last_y_mod = 0;
  mat->index = 0;
  mat->i_own = 1;
  if (is_po2(mat->x_size))
    mat->fast_mask = mat->x_size - 1;
  else
    mat->fast_mask = 0;
}
Esempio n. 3
0
void
stp_dither_matrix_init_short(stp_dither_matrix_impl_t *mat, int x_size, int y_size,
			     const unsigned short *array, int transpose,
			     int prescaled)
{
  int x, y;
  mat->base = x_size;
  mat->exp = 1;
  mat->x_size = x_size;
  mat->y_size = y_size;
  mat->total_size = mat->x_size * mat->y_size;
  mat->matrix = stp_malloc(sizeof(unsigned) * mat->x_size * mat->y_size);
  for (x = 0; x < mat->x_size; x++)
    for (y = 0; y < mat->y_size; y++)
      {
	if (transpose)
	  mat->matrix[x + y * mat->x_size] = array[y + x * mat->y_size];
	else
	  mat->matrix[x + y * mat->x_size] = array[x + y * mat->x_size];
	if (!prescaled)
	  mat->matrix[x + y * mat->x_size] =
	    (double) mat->matrix[x + y * mat->x_size] * 65536.0 /
	    (double) (mat->x_size * mat->y_size);
      }
  mat->last_x = mat->last_x_mod = 0;
  mat->last_y = mat->last_y_mod = 0;
  mat->index = 0;
  mat->i_own = 1;
  if (is_po2(mat->x_size))
    mat->fast_mask = mat->x_size - 1;
  else
    mat->fast_mask = 0;
}
Esempio n. 4
0
struct uio_dma_area *uio_dma_alloc(int fd, unsigned int size,
		unsigned int cache, uint64_t dma_mask, unsigned int memnode)
{
	struct uio_dma_area *da;
	struct uio_dma_alloc_req areq;
	struct uio_dma_free_req freq;
	unsigned int chunk_size;
	int err;

	da = malloc(sizeof(*da));
	if (!da)
		return NULL;

	areq.cache = cache;
	areq.dma_mask = dma_mask;
	areq.memnode  = memnode;
	areq.mmap_offset = 0;
	areq.flags = 0;

	/* Try allocating smallest number of chunks.
 	 * We only allocate power of two sized chunks so that we could
 	 * avoid division in uio_dma_addr() function. */
	err = 0;
	for (chunk_size = roundup_po2(size); chunk_size; chunk_size >>= 1) {
		areq.chunk_size  = chunk_size;
		areq.chunk_count = (size + chunk_size - 1) / chunk_size;
		err = ioctl(fd, UIO_DMA_ALLOC, (unsigned long) &areq);
		if (!err)
			break;
	}
	if (err) {
		free(da);
		return NULL;
	}

	if (!is_po2(areq.chunk_size)) {
		errno = -EILSEQ;
		goto failed;
	}

	da->size = areq.chunk_size * areq.chunk_count;
	da->chunk_count = areq.chunk_count;
	da->chunk_size  = areq.chunk_size;
	da->mmap_offset = areq.mmap_offset;
	da->addr = mmap(NULL, da->size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, da->mmap_offset);
        if (da->addr == MAP_FAILED)
		goto failed;

	return da;

failed:
	/* We need to free the area we just requested from the kernel. */
	err = errno;
	freq.mmap_offset = da->mmap_offset;
	ioctl(fd, UIO_DMA_FREE, (unsigned long) &freq);
	free(da);
	errno = err;
	return NULL;
}
Esempio n. 5
0
void lower_alloc(ir_graph *irg, unsigned new_stack_alignment, bool lower_consts,
                 long new_addr_delta)
{
	if (!is_po2(stack_alignment))
		panic("stack alignment not a power of 2");
	addr_delta           = new_addr_delta;
	stack_alignment      = new_stack_alignment;
	lower_constant_sizes = lower_consts;
	ir_nodeset_init(&transformed);
	irg_walk_graph(irg, lower_alloca_free, NULL, NULL);
	ir_nodeset_destroy(&transformed);
}
Esempio n. 6
0
void
stp_dither_matrix_init_from_dither_array(stp_dither_matrix_impl_t *mat,
					 const stp_array_t *array,
					 int transpose)
{
  int x, y;
  size_t count;
  const unsigned short *vec;
  int x_size, y_size;
  const stp_sequence_t *seq = stp_array_get_sequence(array);
  stp_array_get_size(array, &x_size, &y_size);

  vec = stp_sequence_get_ushort_data(seq, &count);
  mat->base = x_size;
  mat->exp = 1;
  if (transpose)
    {
      mat->x_size = y_size;
      mat->y_size = x_size;
    }
  else
    {
      mat->x_size = x_size;
      mat->y_size = y_size;
    }
  mat->total_size = mat->x_size * mat->y_size;
  mat->matrix = stp_malloc(sizeof(unsigned) * mat->x_size * mat->y_size);
  for (x = 0; x < x_size; x++)
    for (y = 0; y < y_size; y++)
      {
	if (transpose)
	  mat->matrix[y + x * y_size] = vec[x + y * x_size];
	else
	  mat->matrix[x + y * x_size] = vec[x + y * x_size];
      }
  mat->last_x = mat->last_x_mod = 0;
  mat->last_y = mat->last_y_mod = 0;
  mat->index = 0;
  mat->i_own = 1;
  if (is_po2(mat->x_size))
    mat->fast_mask = mat->x_size - 1;
  else
    mat->fast_mask = 0;
}