struct pb_buffer * pb_malloc_buffer_create(pb_size size, const struct pb_desc *desc) { struct malloc_buffer *buf; /* TODO: do a single allocation */ buf = CALLOC_STRUCT(malloc_buffer); if(!buf) return NULL; pipe_reference_init(&buf->base.base.reference, 1); buf->base.base.usage = desc->usage; buf->base.base.size = size; buf->base.base.alignment = desc->alignment; buf->base.vtbl = &malloc_buffer_vtbl; buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment); if(!buf->data) { FREE(buf); return NULL; } return &buf->base; }
static struct pb_buffer * pb_ondemand_manager_create_buffer(struct pb_manager *_mgr, size_t size, const struct pb_desc *desc) { struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr); struct pb_ondemand_buffer *buf; buf = CALLOC_STRUCT(pb_ondemand_buffer); if(!buf) return NULL; pipe_reference_init(&buf->base.base.reference, 1); buf->base.base.alignment = desc->alignment; buf->base.base.usage = desc->usage; buf->base.base.size = size; buf->base.vtbl = &pb_ondemand_buffer_vtbl; buf->mgr = mgr; buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment); if(!buf->data) { FREE(buf); return NULL; } buf->size = size; buf->desc = *desc; return &buf->base; }
static uint8_t * nouveau_transfer_staging(struct nouveau_context *nv, struct nouveau_transfer *tx, boolean permit_pb) { const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK; const unsigned size = align(tx->base.box.width, 4) + adj; if (!nv->push_data) permit_pb = FALSE; if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) { tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN); if (tx->map) tx->map += adj; } else { tx->mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset); if (tx->bo) { tx->offset += adj; if (!nouveau_bo_map(tx->bo, 0, NULL)) tx->map = (uint8_t *)tx->bo->map + tx->offset; } } return tx->map; }
static INLINE boolean nouveau_buffer_malloc(struct nv04_resource *buf) { if (!buf->data) buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN); return !!buf->data; }
static struct pipe_resource * llvmpipe_resource_create(struct pipe_screen *_screen, const struct pipe_resource *templat) { struct llvmpipe_screen *screen = llvmpipe_screen(_screen); struct llvmpipe_resource *lpr = CALLOC_STRUCT(llvmpipe_resource); if (!lpr) return NULL; lpr->base = *templat; pipe_reference_init(&lpr->base.reference, 1); lpr->base.screen = &screen->base; /* assert(lpr->base.bind); */ if (resource_is_texture(&lpr->base)) { if (lpr->base.bind & PIPE_BIND_DISPLAY_TARGET) { /* displayable surface */ if (!llvmpipe_displaytarget_layout(screen, lpr)) goto fail; assert(lpr->layout[0][0] == LP_TEX_LAYOUT_NONE); } else { /* texture map */ if (!llvmpipe_texture_layout(screen, lpr)) goto fail; assert(lpr->layout[0][0] == LP_TEX_LAYOUT_NONE); } assert(lpr->layout[0]); } else { /* other data (vertex buffer, const buffer, etc) */ const enum pipe_format format = templat->format; const uint w = templat->width0 / util_format_get_blockheight(format); /* XXX buffers should only have one dimension, those values should be 1 */ const uint h = templat->height0 / util_format_get_blockwidth(format); const uint d = templat->depth0; const uint bpp = util_format_get_blocksize(format); const uint bytes = w * h * d * bpp; lpr->data = align_malloc(bytes, 16); if (!lpr->data) goto fail; memset(lpr->data, 0, bytes); } lpr->id = id_counter++; #ifdef DEBUG insert_at_tail(&resource_list, lpr); #endif return &lpr->base; fail: FREE(lpr); return NULL; }
/** * Conventional allocation path for non-display textures: * Use a simple, maximally packed layout. */ static boolean softpipe_resource_layout(struct pipe_screen *screen, struct softpipe_resource *spr, boolean allocate) { struct pipe_resource *pt = &spr->base; unsigned level; unsigned width = pt->width0; unsigned height = pt->height0; unsigned depth = pt->depth0; uint64_t buffer_size = 0; for (level = 0; level <= pt->last_level; level++) { unsigned slices, nblocksy; nblocksy = util_format_get_nblocksy(pt->format, height); if (pt->target == PIPE_TEXTURE_CUBE) assert(pt->array_size == 6); if (pt->target == PIPE_TEXTURE_3D) slices = depth; else slices = pt->array_size; spr->stride[level] = util_format_get_stride(pt->format, width); spr->level_offset[level] = buffer_size; /* if row_stride * height > SP_MAX_TEXTURE_SIZE */ if ((uint64_t)spr->stride[level] * nblocksy > SP_MAX_TEXTURE_SIZE) { /* image too large */ return FALSE; } spr->img_stride[level] = spr->stride[level] * nblocksy; buffer_size += (uint64_t) spr->img_stride[level] * slices; width = u_minify(width, 1); height = u_minify(height, 1); depth = u_minify(depth, 1); } if (buffer_size > SP_MAX_TEXTURE_SIZE) return FALSE; if (allocate) { spr->data = align_malloc(buffer_size, 64); return spr->data != NULL; } else { return TRUE; } }
/** * Create new lp_rasterizer. If num_threads is zero, don't create any * new threads, do rendering synchronously. * \param num_threads number of rasterizer threads to create */ struct lp_rasterizer * lp_rast_create( unsigned num_threads ) { struct lp_rasterizer *rast; unsigned i; rast = CALLOC_STRUCT(lp_rasterizer); if (!rast) { goto no_rast; } rast->full_scenes = lp_scene_queue_create(); if (!rast->full_scenes) { goto no_full_scenes; } for (i = 0; i < MAX2(1, num_threads); i++) { struct lp_rasterizer_task *task = &rast->tasks[i]; task->rast = rast; task->thread_index = i; task->thread_data.cache = align_malloc(sizeof(struct lp_build_format_cache), 16); if (!task->thread_data.cache) { goto no_thread_data_cache; } } rast->num_threads = num_threads; rast->no_rast = debug_get_bool_option("LP_NO_RAST", FALSE); create_rast_threads(rast); /* for synchronizing rasterization threads */ if (rast->num_threads > 0) { pipe_barrier_init( &rast->barrier, rast->num_threads ); } memset(lp_dummy_tile, 0, sizeof lp_dummy_tile); return rast; no_thread_data_cache: for (i = 0; i < MAX2(1, rast->num_threads); i++) { if (rast->tasks[i].thread_data.cache) { align_free(rast->tasks[i].thread_data.cache); } } lp_scene_queue_destroy(rast->full_scenes); no_full_scenes: FREE(rast); no_rast: return NULL; }
static void * svga_buffer_map_range( struct pipe_screen *screen, struct pipe_buffer *buf, unsigned offset, unsigned length, unsigned usage ) { struct svga_screen *ss = svga_screen(screen); struct svga_winsys_screen *sws = ss->sws; struct svga_buffer *sbuf = svga_buffer( buf ); void *map; if (!sbuf->swbuf && !sbuf->hwbuf) { if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) { /* * We can't create a hardware buffer big enough, so create a malloc * buffer instead. */ debug_printf("%s: failed to allocate %u KB of DMA, splitting DMA transfers\n", __FUNCTION__, (sbuf->base.size + 1023)/1024); sbuf->swbuf = align_malloc(sbuf->base.size, sbuf->base.alignment); } } if (sbuf->swbuf) { /* User/malloc buffer */ map = sbuf->swbuf; } else if (sbuf->hwbuf) { map = sws->buffer_map(sws, sbuf->hwbuf, usage); } else { map = NULL; } if(map) { pipe_mutex_lock(ss->swc_mutex); ++sbuf->map.count; if (usage & PIPE_BUFFER_USAGE_CPU_WRITE) { assert(sbuf->map.count <= 1); sbuf->map.writing = TRUE; if (usage & PIPE_BUFFER_USAGE_FLUSH_EXPLICIT) sbuf->map.flush_explicit = TRUE; } pipe_mutex_unlock(ss->swc_mutex); } return map; }
int main(int argc, char** argv) { char *data = (char *)align_malloc(100, 16); print_bits(data); data[0] = 'a'; data[99] = 'b'; std::cout << data[0] << " " << data[99] << std::endl; align_free(data); }
static HRESULT NineVolume9_AllocateData( struct NineVolume9 *This ) { unsigned size = This->layer_stride * This->desc.Depth; DBG("(%p(This=%p),level=%u) Allocating 0x%x bytes of system memory.\n", This->base.container, This, This->level, size); This->data = (uint8_t *)align_malloc(size, 32); if (!This->data) return E_OUTOFMEMORY; return D3D_OK; }
static struct llvmpipe_displaytarget * xm_displaytarget_create(struct llvmpipe_winsys *winsys, enum pipe_format format, unsigned width, unsigned height, unsigned alignment, unsigned *stride) { struct xm_displaytarget *xm_dt = CALLOC_STRUCT(xm_displaytarget); unsigned nblocksx, nblocksy, size; xm_dt = CALLOC_STRUCT(xm_displaytarget); if(!xm_dt) goto no_xm_dt; xm_dt->format = format; xm_dt->width = width; xm_dt->height = height; pf_get_block(format, &xm_dt->block); nblocksx = pf_get_nblocksx(&xm_dt->block, width); nblocksy = pf_get_nblocksy(&xm_dt->block, height); xm_dt->stride = align(nblocksx * xm_dt->block.size, alignment); size = xm_dt->stride * nblocksy; #ifdef USE_XSHM if (!debug_get_bool_option("XLIB_NO_SHM", FALSE)) { xm_dt->shminfo.shmid = -1; xm_dt->shminfo.shmaddr = (char *) -1; xm_dt->shm = TRUE; xm_dt->data = alloc_shm(xm_dt, size); if(!xm_dt->data) goto no_data; } #endif if(!xm_dt->data) { xm_dt->data = align_malloc(size, alignment); if(!xm_dt->data) goto no_data; } *stride = xm_dt->stride; return (struct llvmpipe_displaytarget *)xm_dt; no_data: FREE(xm_dt); no_xm_dt: return NULL; }
struct aos_machine *draw_vs_aos_machine( void ) { struct aos_machine *machine; unsigned i; float inv = 1.0f/255.0f; float f255 = 255.0f; machine = align_malloc(sizeof(struct aos_machine), 16); if (!machine) return NULL; memset(machine, 0, sizeof(*machine)); ASSIGN_4V(machine->internal[IMM_SWZ], 1.0f, -1.0f, 0.0f, 1.0f); *(unsigned *)&machine->internal[IMM_SWZ][3] = 0xffffffff; ASSIGN_4V(machine->internal[IMM_ONES], 1.0f, 1.0f, 1.0f, 1.0f); ASSIGN_4V(machine->internal[IMM_NEGS], -1.0f, -1.0f, -1.0f, -1.0f); ASSIGN_4V(machine->internal[IMM_IDENTITY], 0.0f, 0.0f, 0.0f, 1.0f); ASSIGN_4V(machine->internal[IMM_INV_255], inv, inv, inv, inv); ASSIGN_4V(machine->internal[IMM_255], f255, f255, f255, f255); ASSIGN_4V(machine->internal[IMM_RSQ], -.5f, 1.5f, 0.0f, 0.0f); machine->fpu_rnd_nearest = (X87_CW_EXCEPTION_INV_OP | X87_CW_EXCEPTION_DENORM_OP | X87_CW_EXCEPTION_ZERO_DIVIDE | X87_CW_EXCEPTION_OVERFLOW | X87_CW_EXCEPTION_UNDERFLOW | X87_CW_EXCEPTION_PRECISION | (1<<6) | X87_CW_ROUND_NEAREST | X87_CW_PRECISION_DOUBLE_EXT); assert(machine->fpu_rnd_nearest == 0x37f); machine->fpu_rnd_neg_inf = (X87_CW_EXCEPTION_INV_OP | X87_CW_EXCEPTION_DENORM_OP | X87_CW_EXCEPTION_ZERO_DIVIDE | X87_CW_EXCEPTION_OVERFLOW | X87_CW_EXCEPTION_UNDERFLOW | X87_CW_EXCEPTION_PRECISION | (1<<6) | X87_CW_ROUND_DOWN | X87_CW_PRECISION_DOUBLE_EXT); for (i = 0; i < MAX_SHINE_TAB; i++) do_populate_lut( &machine->shine_tab[i], 1.0f ); return machine; }
static struct sw_displaytarget * gdi_sw_displaytarget_create(struct sw_winsys *winsys, unsigned tex_usage, enum pipe_format format, unsigned width, unsigned height, unsigned alignment, const void *front_private, unsigned *stride) { struct gdi_sw_displaytarget *gdt; unsigned cpp; unsigned bpp; gdt = CALLOC_STRUCT(gdi_sw_displaytarget); if(!gdt) goto no_gdt; gdt->format = format; gdt->width = width; gdt->height = height; bpp = util_format_get_blocksizebits(format); cpp = util_format_get_blocksize(format); gdt->stride = align(width * cpp, alignment); gdt->size = gdt->stride * height; gdt->data = align_malloc(gdt->size, alignment); if(!gdt->data) goto no_data; gdt->bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); gdt->bmi.bmiHeader.biWidth = gdt->stride / cpp; gdt->bmi.bmiHeader.biHeight= -(long)height; gdt->bmi.bmiHeader.biPlanes = 1; gdt->bmi.bmiHeader.biBitCount = bpp; gdt->bmi.bmiHeader.biCompression = BI_RGB; gdt->bmi.bmiHeader.biSizeImage = 0; gdt->bmi.bmiHeader.biXPelsPerMeter = 0; gdt->bmi.bmiHeader.biYPelsPerMeter = 0; gdt->bmi.bmiHeader.biClrUsed = 0; gdt->bmi.bmiHeader.biClrImportant = 0; *stride = gdt->stride; return (struct sw_displaytarget *)gdt; no_data: FREE(gdt); no_gdt: return NULL; }
static struct pipe_buffer* xsp_buffer_create(struct pipe_winsys *pws, unsigned alignment, unsigned usage, unsigned size) { struct xsp_buffer *buffer; assert(pws); buffer = calloc(1, sizeof(struct xsp_buffer)); pipe_reference_init(&buffer->base.reference, 1); buffer->base.alignment = alignment; buffer->base.usage = usage; buffer->base.size = size; buffer->data = align_malloc(size, alignment); return (struct pipe_buffer*)buffer; }
boolean draw_gs_init( struct draw_context *draw ) { draw->gs.tgsi.machine = tgsi_exec_machine_create(); if (!draw->gs.tgsi.machine) return FALSE; draw->gs.tgsi.machine->Primitives = align_malloc( MAX_PRIMITIVES * sizeof(struct tgsi_exec_vector), 16); if (!draw->gs.tgsi.machine->Primitives) return FALSE; memset(draw->gs.tgsi.machine->Primitives, 0, MAX_PRIMITIVES * sizeof(struct tgsi_exec_vector)); return TRUE; }
static struct pipe_buffer * st_softpipe_buffer_create(struct pipe_winsys *winsys, unsigned alignment, unsigned usage, unsigned size) { struct st_softpipe_buffer *buffer = CALLOC_STRUCT(st_softpipe_buffer); pipe_reference_init(&buffer->base.reference, 1); buffer->base.alignment = alignment; buffer->base.usage = usage; buffer->base.size = size; buffer->data = align_malloc(size, alignment); return &buffer->base; }
static struct virgl_hw_res * virgl_vtest_winsys_resource_create(struct virgl_winsys *vws, enum pipe_texture_target target, uint32_t format, uint32_t bind, uint32_t width, uint32_t height, uint32_t depth, uint32_t array_size, uint32_t last_level, uint32_t nr_samples, uint32_t size) { struct virgl_vtest_winsys *vtws = virgl_vtest_winsys(vws); struct virgl_hw_res *res; static int handle = 1; res = CALLOC_STRUCT(virgl_hw_res); if (!res) return NULL; if (bind & (VIRGL_BIND_DISPLAY_TARGET | VIRGL_BIND_SCANOUT)) { res->dt = vtws->sws->displaytarget_create(vtws->sws, bind, format, width, height, 64, NULL, &res->stride); } else { res->ptr = align_malloc(size, 64); if (!res->ptr) { FREE(res); return NULL; } } res->bind = bind; res->format = format; res->height = height; res->width = width; virgl_vtest_send_resource_create(vtws, handle, target, format, bind, width, height, depth, array_size, last_level, nr_samples); res->res_handle = handle++; pipe_reference_init(&res->reference, 1); return res; }
static struct pipe_buffer * svga_buffer_create(struct pipe_screen *screen, unsigned alignment, unsigned usage, unsigned size) { struct svga_screen *ss = svga_screen(screen); struct svga_buffer *sbuf; assert(size); assert(alignment); sbuf = CALLOC_STRUCT(svga_buffer); if(!sbuf) goto error1; sbuf->magic = SVGA_BUFFER_MAGIC; pipe_reference_init(&sbuf->base.reference, 1); sbuf->base.screen = screen; sbuf->base.alignment = alignment; sbuf->base.usage = usage; sbuf->base.size = size; if(svga_buffer_needs_hw_storage(usage)) { if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK) goto error2; } else { if(alignment < sizeof(void*)) alignment = sizeof(void*); usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE; sbuf->swbuf = align_malloc(size, alignment); if(!sbuf->swbuf) goto error2; } return &sbuf->base; error2: FREE(sbuf); error1: return NULL; }
static struct sw_displaytarget * xlib_displaytarget_create(struct sw_winsys *winsys, unsigned tex_usage, enum pipe_format format, unsigned width, unsigned height, unsigned alignment, unsigned *stride) { struct xlib_displaytarget *xlib_dt; unsigned nblocksy, size; xlib_dt = CALLOC_STRUCT(xlib_displaytarget); if (!xlib_dt) goto no_xlib_dt; xlib_dt->display = ((struct xlib_sw_winsys *)winsys)->display; xlib_dt->format = format; xlib_dt->width = width; xlib_dt->height = height; nblocksy = util_format_get_nblocksy(format, height); xlib_dt->stride = align(util_format_get_stride(format, width), alignment); size = xlib_dt->stride * nblocksy; if (!debug_get_option_xlib_no_shm()) { xlib_dt->data = alloc_shm(xlib_dt, size); if (xlib_dt->data) { xlib_dt->shm = True; } } if (!xlib_dt->data) { xlib_dt->data = align_malloc(size, alignment); if (!xlib_dt->data) goto no_data; } *stride = xlib_dt->stride; return (struct sw_displaytarget *)xlib_dt; no_data: FREE(xlib_dt); no_xlib_dt: return NULL; }
static boolean lp_setup_allocate_vertices(struct vbuf_render *vbr, ushort vertex_size, ushort nr_vertices) { struct lp_setup_context *setup = lp_setup_context(vbr); unsigned size = vertex_size * nr_vertices; if (setup->vertex_buffer_size < size) { align_free(setup->vertex_buffer); setup->vertex_buffer = align_malloc(size, 16); setup->vertex_buffer_size = size; } setup->vertex_size = vertex_size; setup->nr_vertices = nr_vertices; return setup->vertex_buffer != NULL; }
static boolean sp_vbuf_allocate_vertices(struct vbuf_render *vbr, ushort vertex_size, ushort nr_vertices) { struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr); unsigned size = vertex_size * nr_vertices; if (cvbr->vertex_buffer_size < size) { align_free(cvbr->vertex_buffer); cvbr->vertex_buffer = align_malloc(size, 16); cvbr->vertex_buffer_size = size; } cvbr->vertex_size = vertex_size; cvbr->nr_vertices = nr_vertices; return cvbr->vertex_buffer != NULL; }
/** * Create CPU storage for this buffer. */ static enum pipe_error fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { assert(!fenced_buf->data); if(fenced_buf->data) return PIPE_OK; if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size) return PIPE_ERROR_OUT_OF_MEMORY; fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment); if(!fenced_buf->data) return PIPE_ERROR_OUT_OF_MEMORY; fenced_mgr->cpu_total_size += fenced_buf->size; return PIPE_OK; }
/** * Create a new primitive vbuf/render stage. */ struct draw_stage *draw_vbuf_stage( struct draw_context *draw, struct vbuf_render *render ) { struct vbuf_stage *vbuf = CALLOC_STRUCT(vbuf_stage); if (!vbuf) goto fail; vbuf->stage.draw = draw; vbuf->stage.name = "vbuf"; vbuf->stage.point = vbuf_first_point; vbuf->stage.line = vbuf_first_line; vbuf->stage.tri = vbuf_first_tri; vbuf->stage.flush = vbuf_flush; vbuf->stage.reset_stipple_counter = vbuf_reset_stipple_counter; vbuf->stage.destroy = vbuf_destroy; vbuf->render = render; vbuf->max_indices = MIN2(render->max_indices, UNDEFINED_VERTEX_ID-1); vbuf->indices = (ushort *) align_malloc(vbuf->max_indices * sizeof(vbuf->indices[0]), 16); if (!vbuf->indices) goto fail; vbuf->cache = translate_cache_create(); if (!vbuf->cache) goto fail; vbuf->vertices = NULL; vbuf->vertex_ptr = vbuf->vertices; vbuf->zero4[0] = vbuf->zero4[1] = vbuf->zero4[2] = vbuf->zero4[3] = 0.0f; return &vbuf->stage; fail: if (vbuf) vbuf_destroy(&vbuf->stage); return NULL; }
/** * Specify the surface to cache. */ void lp_tile_cache_set_surface(struct llvmpipe_tile_cache *tc, struct pipe_surface *ps) { if (tc->transfer) { struct pipe_screen *screen = tc->transfer->texture->screen; if (ps == tc->surface) return; if (tc->transfer_map) { screen->transfer_unmap(screen, tc->transfer); tc->transfer_map = NULL; } screen->tex_transfer_destroy(tc->transfer); tc->transfer = NULL; } tc->surface = ps; if (ps) { struct pipe_screen *screen = ps->texture->screen; unsigned x, y; tc->transfer = screen->get_tex_transfer(screen, ps->texture, ps->face, ps->level, ps->zslice, PIPE_TRANSFER_READ_WRITE, 0, 0, ps->width, ps->height); for (y = 0; y < ps->height; y += TILE_SIZE) { for (x = 0; x < ps->width; x += TILE_SIZE) { struct llvmpipe_cached_tile *tile = &tc->entries[y/TILE_SIZE][x/TILE_SIZE]; tile->status = LP_TILE_STATUS_UNDEFINED; if(!tile->color) tile->color = align_malloc( TILE_SIZE*TILE_SIZE*NUM_CHANNELS, 16 ); } } } }
static struct pipe_buffer * xm_buffer_create(struct pipe_winsys *pws, unsigned alignment, unsigned usage, unsigned size) { struct xm_buffer *buffer = CALLOC_STRUCT(xm_buffer); pipe_reference_init(&buffer->base.reference, 1); buffer->base.alignment = alignment; buffer->base.usage = usage; buffer->base.size = size; if (buffer->data == NULL) { /* align to 16-byte multiple for Cell */ buffer->data = align_malloc(size, max(alignment, 16)); } return &buffer->base; }
static struct sw_displaytarget* hook_winsys_displaytarget_create(struct sw_winsys* winsys, unsigned textureUsage, enum pipe_format format, unsigned width, unsigned height, unsigned alignment, unsigned* stride) { CALLED(); struct haiku_displaytarget* haikuDisplayTarget = CALLOC_STRUCT(haiku_displaytarget); if (!haikuDisplayTarget) { ERROR("%s: Couldn't allocate Haiku display target!\n", __func__); return NULL; } haikuDisplayTarget->format = format; haikuDisplayTarget->width = width; haikuDisplayTarget->height = height; //unsigned bitsPerPixel = util_format_get_blocksizebits(format); unsigned colorsPerPalette = util_format_get_blocksize(format); haikuDisplayTarget->stride = align(width * colorsPerPalette, alignment); haikuDisplayTarget->size = haikuDisplayTarget->stride * height; haikuDisplayTarget->data = align_malloc(haikuDisplayTarget->size, alignment); if (!haikuDisplayTarget->data) { ERROR("%s: Couldn't allocate Haiku display target data!\n", __func__); FREE(haikuDisplayTarget); return NULL; } *stride = haikuDisplayTarget->stride; // Cast to ghost sw_displaytarget type return (struct sw_displaytarget*)haikuDisplayTarget; }
/** * Create a new primitive setup/render stage. */ struct setup_context *llvmpipe_setup_create_context( struct llvmpipe_context *llvmpipe ) { struct setup_context *setup; unsigned i; setup = align_malloc(sizeof(struct setup_context), 16); if (!setup) return NULL; memset(setup, 0, sizeof *setup); setup->llvmpipe = llvmpipe; for (i = 0; i < MAX_QUADS; i++) { setup->quad[i].coef = &setup->coef; } setup->span.left[0] = 1000000; /* greater than right[0] */ setup->span.left[1] = 1000000; /* greater than right[1] */ return setup; }
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ) { struct r300_screen *r300screen = r300_screen(screen); struct r300_resource *rbuf; rbuf = MALLOC_STRUCT(r300_resource); rbuf->b.b = *templ; rbuf->b.vtbl = &r300_buffer_vtbl; pipe_reference_init(&rbuf->b.b.reference, 1); rbuf->b.b.screen = screen; rbuf->domain = RADEON_DOMAIN_GTT; rbuf->buf = NULL; rbuf->malloced_buffer = NULL; /* Allocate constant buffers and SWTCL vertex and index buffers in RAM. * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that * we can distinguish them from user-created buffers. */ if (templ->bind & PIPE_BIND_CONSTANT_BUFFER || (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) { rbuf->malloced_buffer = align_malloc(templ->width0, 64); return &rbuf->b.b; } rbuf->buf = r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0, R300_BUFFER_ALIGNMENT, TRUE, rbuf->domain, 0); if (!rbuf->buf) { FREE(rbuf); return NULL; } rbuf->cs_buf = r300screen->rws->buffer_get_cs_handle(rbuf->buf); return &rbuf->b.b; }
/** * Conventional allocation path for non-display textures: * Use a simple, maximally packed layout. */ static boolean softpipe_resource_layout(struct pipe_screen *screen, struct softpipe_resource *spr) { struct pipe_resource *pt = &spr->base; unsigned level; unsigned width = pt->width0; unsigned height = pt->height0; unsigned depth = pt->depth0; unsigned buffer_size = 0; for (level = 0; level <= pt->last_level; level++) { unsigned slices; if (pt->target == PIPE_TEXTURE_CUBE) slices = 6; else if (pt->target == PIPE_TEXTURE_3D) slices = depth; else slices = pt->array_size; spr->stride[level] = util_format_get_stride(pt->format, width); spr->level_offset[level] = buffer_size; buffer_size += (util_format_get_nblocksy(pt->format, height) * slices * spr->stride[level]); width = u_minify(width, 1); height = u_minify(height, 1); depth = u_minify(depth, 1); } spr->data = align_malloc(buffer_size, 16); return spr->data != NULL; }
static struct sw_displaytarget * dri_sw_displaytarget_create(struct sw_winsys *winsys, unsigned tex_usage, enum pipe_format format, unsigned width, unsigned height, unsigned alignment, unsigned *stride) { struct dri_sw_displaytarget *dri_sw_dt; unsigned nblocksy, size, format_stride; dri_sw_dt = CALLOC_STRUCT(dri_sw_displaytarget); if(!dri_sw_dt) goto no_dt; dri_sw_dt->format = format; dri_sw_dt->width = width; dri_sw_dt->height = height; format_stride = util_format_get_stride(format, width); dri_sw_dt->stride = align(format_stride, alignment); nblocksy = util_format_get_nblocksy(format, height); size = dri_sw_dt->stride * nblocksy; dri_sw_dt->data = align_malloc(size, alignment); if(!dri_sw_dt->data) goto no_data; *stride = dri_sw_dt->stride; return (struct sw_displaytarget *)dri_sw_dt; no_data: FREE(dri_sw_dt); no_dt: return NULL; }