/* Allocate & initialize a semaphore */ gx_semaphore_t * /* returns a new semaphore, 0 if error */ gx_semaphore_alloc( gs_memory_t * memory /* memory allocator to use */ ) { gx_semaphore_t *sema; /* sizeof decl'd sema struct, minus semaphore placeholder's size, + actual semaphore size */ unsigned semaSizeof = sizeof(*sema) - sizeof(sema->native) + gp_semaphore_sizeof(); if (gp_semaphore_open(0) == 0) /* see if gp_semaphores are movable */ /* movable */ sema = (gx_semaphore_t *) gs_alloc_bytes(memory, semaSizeof, "gx_semaphore (create)"); else /* unmovable */ sema = (gx_semaphore_t *) gs_alloc_bytes_immovable(memory, semaSizeof, "gx_semaphore (create)"); if (sema == 0) return 0; /* Make sema remember which allocator was used to allocate it */ sema->memory = memory; if (gp_semaphore_open(&sema->native) < 0) { gs_free_object(memory, sema, "gx_semaphore (alloc)"); return 0; } return sema; }
/* Allocate & Init a monitor */ gx_monitor_t * /* returns a new monitor, 0 if error */ gx_monitor_alloc( gs_memory_t * memory /* memory allocator to use */ ) { gx_monitor_t *mon; /* sizeof decl'd mon struct, minus monitor placeholder's size, + actual monitor size */ unsigned monSizeof = sizeof(*mon) - sizeof(mon->native) + gp_monitor_sizeof(); if (gp_monitor_open(0) == 0) /* see if gp_monitors are movable */ /* movable */ mon = (gx_monitor_t *) gs_alloc_bytes(memory, monSizeof, "gx_monitor (create)"); else /* unmovable */ mon = (gx_monitor_t *) gs_alloc_bytes_immovable(memory, monSizeof, "gx_monitor (create)"); if (mon == 0) return 0; /* Make monitor remember which allocator was used to allocate it */ mon->memory = memory; if (gp_monitor_open(&mon->native) < 0) { gs_free_object(memory, mon, "gx_monitor (alloc)"); return 0; } return mon; }
/* Create the retrying and the locked wrapper for the heap allocator. */ int gs_malloc_wrap(gs_memory_t **wrapped, gs_malloc_memory_t *contents) { # ifdef USE_RETRY_MEMORY_WRAPPER /* * This is deprecated since 'retry' for clist reversion/cycling * will ONLY work for monochrome, simple PS or PCL, not for a * color device and not for PDF or XPS with transparency */ { gs_memory_retrying_t *rmem; rmem = (gs_memory_retrying_t *) gs_alloc_bytes_immovable((gs_memory_t *)lmem, sizeof(gs_memory_retrying_t), "gs_malloc_wrap(retrying)"); if (rmem == 0) { gs_memory_locked_release(lmem); gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)"); return_error(gs_error_VMerror); } code = gs_memory_retrying_init(rmem, (gs_memory_t *)lmem); if (code < 0) { gs_free_object((gs_memory_t *)lmem, rmem, "gs_malloc_wrap(retrying)"); gs_memory_locked_release(lmem); gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)"); return code; } *wrapped = (gs_memory_t *)rmem; } # endif /* retrying */ return 0; }
int gs_lib_ctx_init( gs_memory_t *mem ) { gs_lib_ctx_t *pio = 0; /* Check the non gc allocator is being passed in */ if (mem == 0 || mem != mem->non_gc_memory) return gs_error_Fatal; #ifndef GS_THREADSAFE mem_err_print = mem; #endif if (mem->gs_lib_ctx) /* one time initialization */ return 0; pio = (gs_lib_ctx_t*)gs_alloc_bytes_immovable(mem, sizeof(gs_lib_ctx_t), "gs_lib_ctx_init"); if( pio == 0 ) return -1; /* Wholesale blanking is cheaper than retail, and scales better when new * fields are added. */ memset(pio, 0, sizeof(*pio)); /* Now set the non zero/false/NULL things */ pio->memory = mem; gs_lib_ctx_get_real_stdio(&pio->fstdin, &pio->fstdout, &pio->fstderr ); pio->stdin_is_interactive = true; /* id's 1 through 4 are reserved for Device color spaces; see gscspace.h */ pio->gs_next_id = 5; /* this implies that each thread has its own complete state */ /* Need to set this before calling gs_lib_ctx_set_icc_directory. */ mem->gs_lib_ctx = pio; /* Initialize our default ICCProfilesDir */ pio->profiledir = NULL; pio->profiledir_len = 0; gs_lib_ctx_set_icc_directory(mem, DEFAULT_DIR_ICC, strlen(DEFAULT_DIR_ICC)); if (gs_lib_ctx_set_default_device_list(mem, gs_dev_defaults, strlen(gs_dev_defaults)) < 0) { gs_free_object(mem, pio, "gs_lib_ctx_init"); mem->gs_lib_ctx = NULL; } /* Initialise the underlying CMS. */ if (gscms_create(mem)) { gs_free_object(mem, mem->gs_lib_ctx->default_device_list, "gs_lib_ctx_fin"); gs_free_object(mem, pio, "gs_lib_ctx_init"); mem->gs_lib_ctx = NULL; return -1; } gp_get_realtime(pio->real_time_0); return 0; }
GSDLLEXPORT int GSDLLAPI gsapi_run_file(void *lib, const char *file_name, int user_errors, int *pexit_code) { #ifndef GS_NO_UTF8 char *d, *temp; const char *c = file_name; char dummy[6]; int rune, code, len; #endif gs_lib_ctx_t *ctx = (gs_lib_ctx_t *)lib; gs_main_instance *minst; if (lib == NULL) return gs_error_Fatal; minst = get_minst_from_memory(ctx->memory); #ifdef GS_NO_UTF8 return gs_main_run_file(minst, file_name, user_errors, pexit_code, &(minst->error_object)); #else /* Convert the file_name to utf8 */ if (minst->get_codepoint) { len = 1; while ((rune = minst->get_codepoint(NULL, &c)) >= 0) len += codepoint_to_utf8(dummy, rune); temp = (char *)gs_alloc_bytes_immovable(ctx->memory, len, "gsapi_run_file"); if (temp == NULL) return 0; c = file_name; d = temp; while ((rune = minst->get_codepoint(NULL, &c)) >= 0) d += codepoint_to_utf8(d, rune); *d = 0; } else { temp = (char *)file_name; } code = gs_main_run_file(minst, temp, user_errors, pexit_code, &(minst->error_object)); if (temp != file_name) gs_free_object(ctx->memory, temp, "gsapi_run_file"); return code; #endif }
static void * jpeg_alloc(j_common_ptr cinfo, size_t size, const char *info) { jpeg_compress_data *jcd = cinfo2jcd(cinfo); gs_memory_t *mem = jcd->memory; jpeg_block_t *p = gs_alloc_struct_immovable(mem, jpeg_block_t, &st_jpeg_block, "jpeg_alloc(block)"); void *data = gs_alloc_bytes_immovable(mem, size, info); if (p == 0 || data == 0) { gs_free_object(mem, data, info); gs_free_object(mem, p, "jpeg_alloc(block)"); return 0; } p->data = data; p->next = jcd->blocks; jcd->blocks = p; return data; }
int i_plugin_init(i_ctx_t *i_ctx_p) { gs_memory_t *mem_raw = i_ctx_p->memory.current->non_gc_memory; const i_plugin_instantiation_proc *p = i_plugin_table; i_plugin_holder *h; int code; i_plugin_client_memory client_mem; i_plugin_make_memory(&client_mem, mem_raw); for (; *p != 0; p++) { i_plugin_instance *instance = 0; code = (*p)(&client_mem, &instance); if (code != 0) return code; h = (i_plugin_holder *)gs_alloc_bytes_immovable(mem_raw, sizeof(i_plugin_holder), "plugin_holder"); if (h == 0) return_error(e_Fatal); h->I = instance; h->next = i_ctx_p->plugin_list; i_ctx_p->plugin_list = h; } return 0; }
int gs_lib_ctx_init( gs_memory_t *mem ) { gs_lib_ctx_t *pio = 0; if ( mem == 0 ) return -1; /* assert mem != 0 */ mem_err_print = mem; if (mem->gs_lib_ctx) /* one time initialization */ return 0; pio = mem->gs_lib_ctx = (gs_lib_ctx_t*)gs_alloc_bytes_immovable(mem, sizeof(gs_lib_ctx_t), "gs_lib_ctx_init"); if( pio == 0 ) return -1; pio->memory = mem; gs_lib_ctx_get_real_stdio(&pio->fstdin, &pio->fstdout, &pio->fstderr ); pio->fstdout2 = NULL; pio->stdout_is_redirected = false; pio->stdout_to_stderr = false; pio->stdin_is_interactive = true; pio->stdin_fn = 0; pio->stdout_fn = 0; pio->stderr_fn = 0; pio->poll_fn = 0; pio->custom_color_callback = NULL; /* id's 1 through 4 are reserved for Device color spaces; see gscspace.h */ pio->gs_next_id = 5; /* this implies that each thread has its own complete state */ pio->dict_auto_expand = false; return 0; }
int gs_lib_ctx_init( gs_memory_t *mem ) { gs_lib_ctx_t *pio = 0; if ( mem == 0 ) return -1; /* assert mem != 0 */ mem_err_print = mem; if (mem->gs_lib_ctx) /* one time initialization */ return 0; pio = mem->gs_lib_ctx = (gs_lib_ctx_t*)gs_alloc_bytes_immovable(mem, sizeof(gs_lib_ctx_t), "gs_lib_ctx_init"); if( pio == 0 ) return -1; /* Wholesale blanking is cheaper than retail, and scales better when new * fields are added. */ memset(pio, 0, sizeof(*pio)); /* Now set the non zero/false/NULL things */ pio->memory = mem; gs_lib_ctx_get_real_stdio(&pio->fstdin, &pio->fstdout, &pio->fstderr ); pio->stdin_is_interactive = true; /* id's 1 through 4 are reserved for Device color spaces; see gscspace.h */ pio->gs_next_id = 5; /* this implies that each thread has its own complete state */ /* Initialize our default ICCProfilesDir */ pio->profiledir = NULL; pio->profiledir_len = 0; gs_lib_ctx_set_icc_directory(mem, DEFAULT_DIR_ICC, strlen(DEFAULT_DIR_ICC)); gp_get_realtime(pio->real_time_0); return 0; }
/* Create a bandlist allocator. */ static int alloc_bandlist_memory(gs_memory_t ** final_allocator, gs_memory_t * base_allocator) { gs_memory_t *data_allocator = 0; gs_memory_locked_t *locked_allocator = 0; int code = 0; #if defined(DEBUG) && defined(DebugBandlistMemorySize) code = alloc_render_memory(&data_allocator, base_allocator, DebugBandlistMemorySize); if (code < 0) return code; #else data_allocator = (gs_memory_t *)gs_malloc_memory_init(); if (!data_allocator) return_error(gs_error_VMerror); #endif locked_allocator = (gs_memory_locked_t *) gs_alloc_bytes_immovable(data_allocator, sizeof(gs_memory_locked_t), "alloc_bandlist_memory(locked allocator)"); if (!locked_allocator) goto alloc_err; code = gs_memory_locked_init(locked_allocator, data_allocator); if (code < 0) goto alloc_err; *final_allocator = (gs_memory_t *)locked_allocator; return 0; alloc_err: if (locked_allocator) free_bandlist_memory((gs_memory_t *)locked_allocator); else if (data_allocator) gs_memory_free_all(data_allocator, FREE_ALL_EVERYTHING, "alloc_bandlist_memory(data allocator)"); return (code < 0 ? code : gs_note_error(gs_error_VMerror)); }
/* (de)crypt a section of text--the procedure is the same * in each direction. see strimpl.h for return codes. */ static int s_aes_process(stream_state * ss, stream_cursor_read * pr, stream_cursor_write * pw, bool last) { stream_aes_state *const state = (stream_aes_state *) ss; const unsigned char *limit; const long in_size = pr->limit - pr->ptr; const long out_size = pw->limit - pw->ptr; unsigned char temp[16]; int status = 0; /* figure out if we're going to run out of space */ if (in_size > out_size) { limit = pr->ptr + out_size; status = 1; /* need more output space */ } else { limit = pr->limit; status = last ? EOFC : 0; /* need more input */ } /* set up state and context */ if (state->ctx == NULL) { /* allocate the aes context. this is a public struct but it contains internal pointers, so we need to store it separately in immovable memory like any opaque structure. */ state->ctx = (aes_context *)gs_alloc_bytes_immovable(state->memory, sizeof(aes_context), "aes context structure"); if (state->ctx == NULL) { gs_throw(gs_error_VMerror, "could not allocate aes context"); return ERRC; } if (state->keylength < 1 || state->keylength > SAES_MAX_KEYLENGTH) { gs_throw1(gs_error_rangecheck, "invalid aes key length (%d bytes)", state->keylength); return ERRC; } aes_setkey_dec(state->ctx, state->key, state->keylength * 8); } if (!state->initialized) { /* read the initialization vector from the first 16 bytes */ if (in_size < 16) return 0; /* get more data */ memcpy(state->iv, pr->ptr + 1, 16); state->initialized = 1; pr->ptr += 16; } /* decrypt available blocks */ while (pr->ptr + 16 <= limit) { aes_crypt_cbc(state->ctx, AES_DECRYPT, 16, state->iv, pr->ptr + 1, temp); pr->ptr += 16; if (last && pr->ptr == pr->limit) { /* we're on the last block; unpad if necessary */ int pad; if (state->use_padding) { /* we are using RFC 1423-style padding, so the last byte of the plaintext gives the number of bytes to discard */ pad = temp[15]; if (pad < 1 || pad > 16) { gs_throw1(gs_error_rangecheck, "invalid aes padding byte (0x%02x)", (unsigned char)pad); return ERRC; } } else { /* not using padding */ pad = 0; } memcpy(pw->ptr + 1, temp, 16 - pad); pw->ptr += 16 - pad; return EOFC; } memcpy(pw->ptr + 1, temp, 16); pw->ptr += 16; } /* if we got to the end of the file without triggering the padding check, the input must not have been a multiple of 16 bytes long. complain. */ if (status == EOFC) { gs_throw(gs_error_rangecheck, "aes stream isn't a multiple of 16 bytes"); return ERRC; } return status; }