string_buffer(void* mem_ctx) { m_Capacity = 512; m_Ptr = (char*)ralloc_size(mem_ctx, m_Capacity); m_Size = 0; m_Ptr[0] = 0; }
void vc4_init_cl(struct vc4_context *vc4, struct vc4_cl *cl) { cl->base = ralloc_size(vc4, 1); cl->next = cl->base; cl->size = 0; }
glsl_type::glsl_type(const glsl_type *array, unsigned length) : base_type(GLSL_TYPE_ARRAY), sampler_dimensionality(0), sampler_shadow(0), sampler_array(0), sampler_type(0), vector_elements(0), matrix_columns(0), name(NULL), length(length) { this->fields.array = array; /* Inherit the gl type of the base. The GL type is used for * uniform/statevar handling in Mesa and the arrayness of the type * is represented by the size rather than the type. */ this->gl_type = array->gl_type; /* Allow a maximum of 10 characters for the array size. This is enough * for 32-bits of ~0. The extra 3 are for the '[', ']', and terminating * NUL. */ const unsigned name_length = (unsigned)strlen(array->name) + 10 + 3; char *const n = (char *) ralloc_size(this->mem_ctx, name_length); if (length == 0) snprintf(n, name_length, "%s[]", array->name); else snprintf(n, name_length, "%s[%u]", array->name, length); this->name = n; }
static void copy_blob_to_driver_cache_blob(struct blob *blob, struct gl_program *prog) { prog->driver_cache_blob = ralloc_size(NULL, blob->size); memcpy(prog->driver_cache_blob, blob->data, blob->size); prog->driver_cache_blob_size = blob->size; }
/* Callers of this ralloc-based new need not call delete. It's * easier to just ralloc_free 'ctx' (or any of its ancestors). */ static void* operator new(size_t size, void *ctx) { void *node; node = ralloc_size(ctx, size); check(node != NULL); return node; }
static struct keybox * make_keybox(void *mem_ctx, enum iris_program_cache_id cache_id, const void *key, uint32_t key_size) { struct keybox *keybox = ralloc_size(mem_ctx, sizeof(struct keybox) + key_size); keybox->cache_id = cache_id; keybox->size = key_size; memcpy(keybox->data, key, key_size); return keybox; }
static struct vtn_access_chain * vtn_access_chain_extend(struct vtn_builder *b, struct vtn_access_chain *old, unsigned new_ids) { struct vtn_access_chain *chain; unsigned new_len = old->length + new_ids; chain = ralloc_size(b, sizeof(*chain) + new_len * sizeof(chain->link[0])); chain->var = old->var; chain->length = new_len; for (unsigned i = 0; i < old->length; i++) chain->link[i] = old->link[i]; return chain; }
/* Test that we can read and write some large objects, (exercising the code in * the blob_write functions to realloc blob->data. */ static void test_big_objects(void) { void *ctx = ralloc_context(NULL); struct blob blob; struct blob_reader reader; int size = 1000; int count = 1000; size_t i; char *buf; blob_init(&blob); /* Initialize our buffer. */ buf = ralloc_size(ctx, size); for (i = 0; i < size; i++) { buf[i] = i % 256; } /* Write it many times. */ for (i = 0; i < count; i++) { blob_write_bytes(&blob, buf, size); } blob_reader_init(&reader, blob.data, blob.size); /* Read and verify it many times. */ for (i = 0; i < count; i++) { expect_equal_bytes((uint8_t *) buf, blob_read_bytes(&reader, size), size, "read of large objects"); } expect_equal(reader.end - reader.data, reader.current - reader.data, "number of bytes read reading large objects"); expect_equal(false, reader.overrun, "overrun flag not set reading large objects"); blob_finish(&blob); ralloc_free(ctx); }
static void brw_track_state_batch(struct brw_context *brw, enum state_struct_type type, uint32_t offset, int size) { struct intel_batchbuffer *batch = &brw->intel.batch; if (!brw->state_batch_list) { /* Our structs are always aligned to at least 32 bytes, so * our array doesn't need to be any larger */ brw->state_batch_list = ralloc_size(brw, sizeof(*brw->state_batch_list) * batch->bo->size / 32); } brw->state_batch_list[brw->state_batch_count].offset = offset; brw->state_batch_list[brw->state_batch_count].size = size; brw->state_batch_list[brw->state_batch_count].type = type; brw->state_batch_count++; }
glsl_type::glsl_type(const glsl_type *array, unsigned length) : base_type(GLSL_TYPE_ARRAY), sampler_dimensionality(0), sampler_shadow(0), sampler_array(0), sampler_type(0), interface_packing(0), vector_elements(0), matrix_columns(0), name(NULL), length(length) { this->fields.array = array; /* Inherit the gl type of the base. The GL type is used for * uniform/statevar handling in Mesa and the arrayness of the type * is represented by the size rather than the type. */ this->gl_type = array->gl_type; /* Allow a maximum of 10 characters for the array size. This is enough * for 32-bits of ~0. The extra 3 are for the '[', ']', and terminating * NUL. */ const unsigned name_length = (unsigned)strlen(array->name) + 10 + 3; char *const n = (char *) ralloc_size(this->mem_ctx, name_length); if (length == 0) snprintf(n, name_length, "%s[]", array->name); else { /* insert outermost dimensions in the correct spot * otherwise the dimension order will be backwards */ const char *pos = strchr(array->name, '['); if (pos) { int idx = pos - array->name; snprintf(n, idx+1, "%s", array->name); snprintf(n + idx, name_length - idx, "[%u]%s", length, array->name + idx); } else { snprintf(n, name_length, "%s[%u]", array->name, length); } } this->name = n; }
void vc4_simulator_init(struct vc4_screen *screen) { screen->simulator_mem_size = 256 * 1024 * 1024; screen->simulator_mem_base = ralloc_size(screen, screen->simulator_mem_size); /* We supply our own memory so that we can have more aperture * available (256MB instead of simpenrose's default 64MB). */ simpenrose_init_hardware_supply_mem(screen->simulator_mem_base, screen->simulator_mem_size); /* Carve out low memory for tile allocation overflow. The kernel * should be automatically handling overflow memory setup on real * hardware, but for simulation we just get one shot to set up enough * overflow memory before execution. This overflow mem will be used * up over the whole lifetime of simpenrose (not reused on each * flush), so it had better be big. */ simpenrose_supply_overflow_mem(0, OVERFLOW_SIZE); }
glsl_type::glsl_type(const glsl_type *array, unsigned length) : base_type(GLSL_TYPE_ARRAY), sampler_dimensionality(0), sampler_shadow(0), sampler_array(0), inner_type(0), vector_elements(0), matrix_columns(0), name(NULL), length(length), patch_length(0) { this->fields.array = array; /* Allow a maximum of 10 characters for the array size. This is enough * for 32-bits of ~0. The extra 3 are for the '[', ']', and terminating * NUL. */ const size_t name_length = strlen(array->name) + 10 + 3; char *const n = (char *)ralloc_size(this->mem_ctx, name_length); if (length == 0) snprintf(n, name_length, "%s[]", array->name); else snprintf(n, name_length, "%s[%u]", array->name, length); this->name = n; }
/* Callers of this ralloc-based new need not call delete. It's * easier to just ralloc_free 'ctx' (or any of its ancestors). */ static void* operator new(size_t size, void *ctx) { void *entry = ralloc_size(ctx, size); assert(entry != NULL); return entry; }
static SolarSystem *load_from_config(ALLEGRO_CONFIG *cfg) { SolarSystem *solsys; char **primary_names; const char *name; long num_bodies; ALLEGRO_CONFIG_SECTION *sec; int i; /* First pass: Determine the number of bodies in the file */ num_bodies = 0; sec = NULL; while ((name = get_next_config_section(cfg, &sec)) != NULL) { if (name[0] != '\0') num_bodies++; } if (num_bodies == 0) return NULL; /* Empty solarsystem */ solsys = ralloc_size(NULL, sizeof(SolarSystem) + num_bodies*sizeof(Body)); if (solsys == NULL) return NULL; solsys->num_bodies = num_bodies; primary_names = ralloc_array(solsys, char *, num_bodies); if (primary_names == NULL) { ralloc_free(solsys); return NULL; } /* Second pass: Load all celestial bodies */ i = 0; sec = NULL; while ((name = get_next_config_section(cfg, &sec)) != NULL && i < num_bodies) { if (name[0] == '\0') continue; solsys->body[i].ctx = solsys; if (!load_body(cfg, name, &solsys->body[i], &primary_names[i])) { log_err("Couldn't load body %s\n", name); ralloc_free(solsys); return NULL; } i++; } if (i < num_bodies) log_err("Internal consistency error\n"); /* Third pass: Connect each satellite body to its primary */ for (i = 0; i < num_bodies; i++) { Body *body = &solsys->body[i]; char *primary_name = primary_names[i]; if (primary_name == NULL) /* Independent body */ continue; /* Look for the primary */ body->primary = NULL; for (int j = 0; j < num_bodies; j++) { Body *body2 = &solsys->body[j]; if (strcmp(primary_name, body2->name) == 0) { body->primary = body2; break; } } if (body->primary == NULL) { log_err("Couldn't find %s's primary: %s\n", body->name, primary_name); ralloc_free(solsys); return NULL; } ralloc_free(primary_name); primary_name = NULL; /* Won't ever be used again */ body->primary->num_satellites++; body->primary->satellite = reralloc(solsys, body->primary->satellite, Body *, body->primary->num_satellites); if (body->primary->satellite == NULL) { log_err("Out of memory\n"); ralloc_free(solsys); return NULL; } body->primary->satellite[body->primary->num_satellites - 1] = body; body->orbit.epoch = 0; body->orbit.period = M_TWO_PI * sqrt(CUBE(body->orbit.SMa) / body->primary->grav_param); body->orbit.plane_orientation = quat_euler(RAD(body->orbit.LAN), RAD(body->orbit.Inc), RAD(body->orbit.APe)); } ralloc_free(primary_names); return solsys; }
/* simple allocator to carve allocations out of an up-front allocated heap, * so that we can free everything easily in one shot. */ void * ir3_alloc(struct ir3 *shader, int sz) { return ralloc_size(shader, sz); }
struct program_cache * cache_create(void) { void *local; struct program_cache *cache = NULL; char *path, *max_size_str; uint64_t max_size; int fd = -1; struct stat sb; size_t size; /* A ralloc context for transient data during this invocation. */ local = ralloc_context(NULL); if (local == NULL) goto fail; /* At user request, disable shader cache entirely. */ if (getenv("MESA_GLSL_CACHE_DISABLE")) goto fail; /* Determine path for cache based on the first defined name as follows: * * $MESA_GLSL_CACHE_DIR * $XDG_CACHE_HOME/mesa * <pwd.pw_dir>/.cache/mesa */ path = getenv("MESA_GLSL_CACHE_DIR"); if (path && mkdir_if_needed(path) == -1) { goto fail; } if (path == NULL) { char *xdg_cache_home = getenv("XDG_CACHE_HOME"); if (xdg_cache_home) { if (mkdir_if_needed(xdg_cache_home) == -1) goto fail; path = concatenate_and_mkdir(local, xdg_cache_home, "mesa"); if (path == NULL) goto fail; } } if (path == NULL) { char *buf; size_t buf_size; struct passwd pwd, *result; buf_size = sysconf(_SC_GETPW_R_SIZE_MAX); if (buf_size == -1) buf_size = 512; /* Loop until buf_size is large enough to query the directory */ while (1) { buf = ralloc_size(local, buf_size); getpwuid_r(getuid(), &pwd, buf, buf_size, &result); if (result) break; if (errno == ERANGE) { ralloc_free(buf); buf = NULL; buf_size *= 2; } else { goto fail; } } path = concatenate_and_mkdir(local, pwd.pw_dir, ".cache"); if (path == NULL) goto fail; path = concatenate_and_mkdir(local, path, "mesa"); if (path == NULL) goto fail; } cache = ralloc(NULL, struct program_cache); if (cache == NULL) goto fail; cache->path = ralloc_strdup(cache, path); if (cache->path == NULL) goto fail; path = ralloc_asprintf(local, "%s/index", cache->path); if (path == NULL) goto fail; fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644); if (fd == -1) goto fail; if (fstat(fd, &sb) == -1) goto fail; /* Force the index file to be the expected size. */ size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE; if (sb.st_size != size) { if (ftruncate(fd, size) == -1) goto fail; } /* We map this shared so that other processes see updates that we * make. * * Note: We do use atomic addition to ensure that multiple * processes don't scramble the cache size recorded in the * index. But we don't use any locking to prevent multiple * processes from updating the same entry simultaneously. The idea * is that if either result lands entirely in the index, then * that's equivalent to a well-ordered write followed by an * eviction and a write. On the other hand, if the simultaneous * writes result in a corrupt entry, that's not really any * different than both entries being evicted, (since within the * guarantees of the cryptographic hash, a corrupt entry is * unlikely to ever match a real cache key). */ cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (cache->index_mmap == MAP_FAILED) goto fail; cache->index_mmap_size = size; close(fd); cache->size = (uint64_t *) cache->index_mmap; cache->stored_keys = cache->index_mmap + sizeof(uint64_t); max_size = 0; max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE"); if (max_size_str) { char *end; max_size = strtoul(max_size_str, &end, 10); if (end == max_size_str) { max_size = 0; } else { while (*end && isspace(*end)) end++; switch (*end) { case 'K': case 'k': max_size *= 1024; break; case 'M': case 'm': max_size *= 1024*1024; break; case '\0': case 'G': case 'g': default: max_size *= 1024*1024*1024; break; } } } /* Default to 1GB for maximum cache size. */ if (max_size == 0) max_size = 1024*1024*1024; cache->max_size = max_size; ralloc_free(local); return cache; fail: if (fd != -1) close(fd); if (cache) ralloc_free(cache); ralloc_free(local); return NULL; }