// returns ord or -1 int pvm_get_method_ordinal( pvm_object_t tclass, pvm_object_t mname ) { struct data_area_4_class *cda= pvm_object_da( tclass, class ); pvm_object_t mnames = cda->method_names; if( pvm_is_null(mnames) ) return -1; if( pvm_is_null(mname) ) return -1; if( !pvm_object_class_exactly_is( mname, pvm_get_string_class() ) ) return -1; int nitems = get_array_size( mnames.data ); int i; for( i = 0; i < nitems; i++ ) { pvm_object_t curr_mname = pvm_get_ofield( mnames, i ); int diff = pvm_strcmp( curr_mname, mname); if( diff == 0 ) return i; } return -1; }
bool ktx_texture::consistency_check() const { if (!check_header()) return false; uint32_t block_dim = 0, bytes_per_block = 0; if ((!m_header.m_glType) || (!m_header.m_glFormat)) { if ((m_header.m_glType) || (m_header.m_glFormat)) return false; if (!ktx_get_ogl_fmt_desc(m_header.m_glInternalFormat, m_header.m_glType, block_dim, bytes_per_block)) return false; if (block_dim == 1) return false; //if ((get_width() % block_dim) || (get_height() % block_dim)) // return false; } else { if (!ktx_get_ogl_fmt_desc(m_header.m_glFormat, m_header.m_glType, block_dim, bytes_per_block)) return false; if (block_dim > 1) return false; } if ((m_block_dim != block_dim) || (m_bytes_per_block != bytes_per_block)) return false; uint32_t total_expected_images = get_total_images(); if (m_image_data.size() != total_expected_images) return false; for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { const uint8_vec &image_data = get_image_data(get_image_index(mip_level, array_element, face, zslice)); uint32_t expected_image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; if (image_data.size() != expected_image_size) return false; } } } } return true; }
int pvm_get_field_name_count( pvm_object_t tclass ) { struct data_area_4_class *cda= pvm_object_da( tclass, class ); pvm_object_t fnames = cda->field_names; if( pvm_is_null(fnames)) return 0; return get_array_size( fnames.data ); }
// returns a valarray of indices reprsented by the generalized slice static std::valarray<std::size_t> get_index_array (const std::gslice &gsl) { const std::size_t size = get_array_size (gsl); std::valarray<std::size_t> indices (size); std::valarray<std::size_t> tmpstore; for (std::size_t i = 0; i != size; ++i) indices [i] = next_index (gsl, tmpstore); return indices; }
t_socket *init_client(t_client *it) { t_socket *sock; !(sock = malloc(sizeof(t_socket))) ? error("malloc") : 0; it->team = NULL; it->host = LOCALHOST; it->port = 0; it->ia = init_ia(); it->size = get_array_size(it->ia); it->fdmax = NULL; it->rfds = NULL; return (sock); }
void activate_all_threads() { int nthreads = get_array_size(pvm_root.threads_list.data); if( nthreads == 0 ) SHOW_ERROR0( 0, "There are 0 live threads in image, system must be dead :(" ); SHOW_FLOW( 3, "Activating %d threads", nthreads); while(nthreads--) { struct pvm_object th = pvm_get_array_ofield(pvm_root.threads_list.data, nthreads ); pvm_check_is_thread( th ); start_new_vm_thread( th ); } all_threads_started = 1; }
uint32_t ktx_texture::get_total_images() const { if (!is_valid() || !get_num_mips()) return 0; // bogus: //return get_num_mips() * (get_depth() * get_num_faces() * get_array_size()); // Naive algorithm, could just compute based off the # of mips uint32_t max_index = 0; for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t total_zslices = math::maximum<uint32_t>(get_depth() >> mip_level, 1U); uint32_t index = get_image_index(mip_level, get_array_size() - 1, get_num_faces() - 1, total_zslices - 1); max_index = math::maximum<uint32_t>(max_index, index); } return max_index + 1; }
static void remove_vm_thread_from_list(pvm_object_storage_t *os) { // TODO check that is is a thread int nthreads = get_array_size(pvm_root.threads_list.data); if( !nthreads ) SHOW_ERROR0( 0, "There were 0 live threads in image, and some thread is dead. Now -1?" ); int nkill = 0; while(nthreads--) { struct pvm_object th = pvm_get_array_ofield(pvm_root.threads_list.data, nthreads ); pvm_check_is_thread( th ); if( th.data == os ) { pvm_set_array_ofield(pvm_root.threads_list.data, nthreads, pvm_create_null_object() ); nkill++; } } if(1 != nkill) printf("Nkill = %d\n", nkill); }
/* Standard svn test program */ int main(int argc, const char *argv[]) { const char *prog_name; int i; svn_boolean_t got_error = FALSE; apr_pool_t *pool, *test_pool; svn_boolean_t ran_a_test = FALSE; svn_boolean_t list_mode = FALSE; int opt_id; apr_status_t apr_err; apr_getopt_t *os; svn_error_t *err; char errmsg[200]; /* How many tests are there? */ int array_size = get_array_size(); svn_test_opts_t opts = { NULL }; opts.fs_type = DEFAULT_FS_TYPE; /* Initialize APR (Apache pools) */ if (apr_initialize() != APR_SUCCESS) { printf("apr_initialize() failed.\n"); exit(1); } /* set up the global pool. Use a separate allocator to limit memory * usage but make it thread-safe to allow for multi-threaded tests. */ pool = apr_allocator_owner_get(svn_pool_create_allocator(TRUE)); /* Remember the command line */ test_argc = argc; test_argv = argv; err = svn_cmdline__getopt_init(&os, argc, argv, pool); os->interleave = TRUE; /* Let options and arguments be interleaved */ /* Strip off any leading path components from the program name. */ prog_name = strrchr(argv[0], '/'); if (prog_name) prog_name++; else { /* Just check if this is that weird platform that uses \ instead of / for the path separator. */ prog_name = strrchr(argv[0], '\\'); if (prog_name) prog_name++; else prog_name = argv[0]; } if (err) return svn_cmdline_handle_exit_error(err, pool, prog_name); while (1) { const char *opt_arg; /* Parse the next option. */ apr_err = apr_getopt_long(os, cl_options, &opt_id, &opt_arg); if (APR_STATUS_IS_EOF(apr_err)) break; else if (apr_err && (apr_err != APR_BADCH)) { /* Ignore invalid option error to allow passing arbitary options */ fprintf(stderr, "apr_getopt_long failed : [%d] %s\n", apr_err, apr_strerror(apr_err, errmsg, sizeof(errmsg))); exit(1); } switch (opt_id) { case cleanup_opt: cleanup_mode = TRUE; break; case config_opt: opts.config_file = apr_pstrdup(pool, opt_arg); break; case fstype_opt: opts.fs_type = apr_pstrdup(pool, opt_arg); break; case list_opt: list_mode = TRUE; break; case mode_filter_opt: if (svn_cstring_casecmp(opt_arg, "PASS") == 0) mode_filter = svn_test_pass; else if (svn_cstring_casecmp(opt_arg, "XFAIL") == 0) mode_filter = svn_test_xfail; else if (svn_cstring_casecmp(opt_arg, "SKIP") == 0) mode_filter = svn_test_skip; else if (svn_cstring_casecmp(opt_arg, "ALL") == 0) mode_filter = svn_test_all; else { fprintf(stderr, "FAIL: Invalid --mode-filter option. Try "); fprintf(stderr, " PASS, XFAIL, SKIP or ALL.\n"); exit(1); } break; case verbose_opt: verbose_mode = TRUE; break; case quiet_opt: quiet_mode = TRUE; break; case allow_segfault_opt: allow_segfaults = TRUE; break; case server_minor_version_opt: { char *end; opts.server_minor_version = (int) strtol(opt_arg, &end, 10); if (end == opt_arg || *end != '\0') { fprintf(stderr, "FAIL: Non-numeric minor version given\n"); exit(1); } if ((opts.server_minor_version < 3) || (opts.server_minor_version > 6)) { fprintf(stderr, "FAIL: Invalid minor version given\n"); exit(1); } } } } /* Disable sleeping for timestamps, to speed up the tests. */ apr_env_set( "SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS", "yes", pool); /* You can't be both quiet and verbose. */ if (quiet_mode && verbose_mode) { fprintf(stderr, "FAIL: --verbose and --quiet are mutually exclusive\n"); exit(1); } /* Create an iteration pool for the tests */ cleanup_pool = svn_pool_create(pool); test_pool = svn_pool_create(pool); if (!allow_segfaults) svn_error_set_malfunction_handler(svn_error_raise_on_malfunction); if (argc >= 2) /* notice command-line arguments */ { if (! strcmp(argv[1], "list") || list_mode) { const char *header_msg; ran_a_test = TRUE; /* run all tests with MSG_ONLY set to TRUE */ header_msg = "Test # Mode Test Description\n" "------ ----- ----------------\n"; for (i = 1; i <= array_size; i++) { if (do_test_num(prog_name, i, TRUE, &opts, &header_msg, test_pool)) got_error = TRUE; /* Clear the per-function pool */ svn_pool_clear(test_pool); svn_pool_clear(cleanup_pool); } } else { for (i = 1; i < argc; i++) { if (svn_ctype_isdigit(argv[i][0]) || argv[i][0] == '-') { int test_num = atoi(argv[i]); if (test_num == 0) /* A --option argument, most likely. */ continue; ran_a_test = TRUE; if (do_test_num(prog_name, test_num, FALSE, &opts, NULL, test_pool)) got_error = TRUE; /* Clear the per-function pool */ svn_pool_clear(test_pool); svn_pool_clear(cleanup_pool); } } } } if (! ran_a_test) { /* just run all tests */ for (i = 1; i <= array_size; i++) { if (do_test_num(prog_name, i, FALSE, &opts, NULL, test_pool)) got_error = TRUE; /* Clear the per-function pool */ svn_pool_clear(test_pool); svn_pool_clear(cleanup_pool); } } /* Clean up APR */ svn_pool_destroy(pool); /* takes test_pool with it */ apr_terminate(); return got_error; }
/* Execute a test number TEST_NUM. Pretty-print test name and dots according to our test-suite spec, and return the result code. If HEADER_MSG and *HEADER_MSG are not NULL, print *HEADER_MSG prior to pretty-printing the test information, then set *HEADER_MSG to NULL. */ static svn_boolean_t do_test_num(const char *progname, int test_num, svn_boolean_t msg_only, svn_test_opts_t *opts, const char **header_msg, apr_pool_t *pool) { svn_boolean_t skip, xfail, wimp; svn_error_t *err = NULL; svn_boolean_t test_failed; const char *msg = NULL; /* the message this individual test prints out */ const struct svn_test_descriptor_t *desc; const int array_size = get_array_size(); svn_boolean_t run_this_test; /* This test's mode matches DESC->MODE. */ /* Check our array bounds! */ if (test_num < 0) test_num += array_size + 1; if ((test_num > array_size) || (test_num <= 0)) { if (header_msg && *header_msg) printf("%s", *header_msg); printf("FAIL: %s: THERE IS NO TEST NUMBER %2d\n", progname, test_num); skip_cleanup = TRUE; return TRUE; /* BAIL, this test number doesn't exist. */ } desc = &test_funcs[test_num]; skip = desc->mode == svn_test_skip; xfail = desc->mode == svn_test_xfail; wimp = xfail && desc->wip; msg = desc->msg; run_this_test = mode_filter == svn_test_all || mode_filter == desc->mode; if (run_this_test && header_msg && *header_msg) { printf("%s", *header_msg); *header_msg = NULL; } if (!allow_segfaults) { /* Catch a crashing test, so we don't interrupt the rest of 'em. */ apr_signal(SIGSEGV, crash_handler); } /* We use setjmp/longjmp to recover from the crash. setjmp() essentially establishes a rollback point, and longjmp() goes back to that point. When we invoke longjmp(), it instructs setjmp() to return non-zero, so we don't end up in an infinite loop. If we've got non-zero from setjmp(), we know we've crashed. */ if (setjmp(jump_buffer) == 0) { /* Do test */ if (msg_only || skip || !run_this_test) ; /* pass */ else if (desc->func2) err = (*desc->func2)(pool); else err = (*desc->func_opts)(opts, pool); if (err && err->apr_err == SVN_ERR_TEST_SKIPPED) { svn_error_clear(err); err = SVN_NO_ERROR; skip = TRUE; } } else err = svn_error_create(SVN_ERR_TEST_FAILED, NULL, "Test crashed " "(run in debugger with '--allow-segfaults')"); if (!allow_segfaults) { /* Now back to your regularly scheduled program... */ apr_signal(SIGSEGV, SIG_DFL); } /* Failure means unexpected results -- FAIL or XPASS. */ test_failed = (!wimp && ((err != SVN_NO_ERROR) != (xfail != 0))); /* If we got an error, print it out. */ if (err) { svn_handle_error2(err, stdout, FALSE, "svn_tests: "); svn_error_clear(err); } if (msg_only) { if (run_this_test) printf(" %3d %-5s %s%s%s%s\n", test_num, (xfail ? "XFAIL" : (skip ? "SKIP" : "")), msg ? msg : "(test did not provide name)", (wimp && verbose_mode) ? " [[" : "", (wimp && verbose_mode) ? desc->wip : "", (wimp && verbose_mode) ? "]]" : ""); } else if (run_this_test && ((! quiet_mode) || test_failed)) { printf("%s %s %d: %s%s%s%s\n", (err ? (xfail ? "XFAIL:" : "FAIL: ") : (xfail ? "XPASS:"******"SKIP: " : "PASS: "******"(test did not provide name)", wimp ? " [[WIMP: " : "", wimp ? desc->wip : "", wimp ? "]]" : ""); } if (msg) { size_t len = strlen(msg); if (len > 50) printf("WARNING: Test docstring exceeds 50 characters\n"); if (msg[len - 1] == '.') printf("WARNING: Test docstring ends in a period (.)\n"); if (svn_ctype_isupper(msg[0])) printf("WARNING: Test docstring is capitalized\n"); } if (desc->msg == NULL) printf("WARNING: New-style test descriptor is missing a docstring.\n"); fflush(stdout); skip_cleanup = test_failed; return test_failed; }
Expression Expression::get_array_element(const size_t& index) const { assert( is_unnamed_array() ); assert( index < get_array_size() ); return Expression(_expr.operands().at(index), _ns); }
bool ktx_texture::write_to_stream(data_stream_serializer &serializer, bool no_keyvalue_data) const { if (!consistency_check()) { VOGL_ASSERT_ALWAYS; return false; } memcpy(m_header.m_identifier, s_ktx_file_id, sizeof(m_header.m_identifier)); m_header.m_endianness = m_opposite_endianness ? KTX_OPPOSITE_ENDIAN : KTX_ENDIAN; if (m_block_dim == 1) { m_header.m_glTypeSize = ktx_get_ogl_type_size(m_header.m_glType); m_header.m_glBaseInternalFormat = m_header.m_glFormat; } else { m_header.m_glBaseInternalFormat = ktx_get_ogl_compressed_base_internal_fmt(m_header.m_glInternalFormat); } m_header.m_bytesOfKeyValueData = 0; if (!no_keyvalue_data) { for (uint32_t i = 0; i < m_key_values.size(); i++) m_header.m_bytesOfKeyValueData += sizeof(uint32_t) + ((m_key_values[i].size() + 3) & ~3); } if (m_opposite_endianness) m_header.endian_swap(); bool success = (serializer.write(&m_header, sizeof(m_header), 1) == 1); if (m_opposite_endianness) m_header.endian_swap(); if (!success) return success; uint32_t total_key_value_bytes = 0; const uint8_t padding[3] = { 0, 0, 0 }; if (!no_keyvalue_data) { for (uint32_t i = 0; i < m_key_values.size(); i++) { uint32_t key_value_size = m_key_values[i].size(); if (m_opposite_endianness) key_value_size = utils::swap32(key_value_size); success = (serializer.write(&key_value_size, sizeof(key_value_size), 1) == 1); total_key_value_bytes += sizeof(key_value_size); if (m_opposite_endianness) key_value_size = utils::swap32(key_value_size); if (!success) return false; if (key_value_size) { if (serializer.write(&m_key_values[i][0], key_value_size, 1) != 1) return false; total_key_value_bytes += key_value_size; uint32_t num_padding = 3 - ((key_value_size + 3) % 4); if ((num_padding) && (serializer.write(padding, num_padding, 1) != 1)) return false; total_key_value_bytes += num_padding; } } (void)total_key_value_bytes; } VOGL_ASSERT(total_key_value_bytes == m_header.m_bytesOfKeyValueData); for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; uint32_t image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; if ((m_header.m_numberOfArrayElements) || (get_num_faces() == 1)) image_size *= (get_array_size() * get_num_faces() * mip_depth); if (!image_size) { VOGL_ASSERT_ALWAYS; return false; } if (m_opposite_endianness) image_size = utils::swap32(image_size); success = (serializer.write(&image_size, sizeof(image_size), 1) == 1); if (m_opposite_endianness) image_size = utils::swap32(image_size); if (!success) return false; uint32_t total_mip_size = 0; uint32_t total_image_data_size = 0; if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { // plain non-array cubemap for (uint32_t face = 0; face < get_num_faces(); face++) { const uint8_vec &image_data = get_image_data(get_image_index(mip_level, 0, face, 0)); if ((!image_data.size()) || (image_data.size() != image_size)) return false; if (m_opposite_endianness) { uint8_vec tmp_image_data(image_data); utils::endian_swap_mem(&tmp_image_data[0], tmp_image_data.size(), m_header.m_glTypeSize); if (serializer.write(&tmp_image_data[0], tmp_image_data.size(), 1) != 1) return false; } else if (serializer.write(&image_data[0], image_data.size(), 1) != 1) return false; // Not +=, but =, because of the silly image_size plain cubemap exception in the KTX file format total_image_data_size = image_data.size(); uint32_t num_cube_pad_bytes = 3 - ((image_data.size() + 3) % 4); if ((num_cube_pad_bytes) && (serializer.write(padding, num_cube_pad_bytes, 1) != 1)) return false; total_mip_size += image_size + num_cube_pad_bytes; } } else { // 1D, 2D, 3D (normal or array texture), or array cubemap for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { const uint8_vec &image_data = get_image_data(get_image_index(mip_level, array_element, face, zslice)); if (!image_data.size()) return false; if (m_opposite_endianness) { uint8_vec tmp_image_data(image_data); utils::endian_swap_mem(&tmp_image_data[0], tmp_image_data.size(), m_header.m_glTypeSize); if (serializer.write(&tmp_image_data[0], tmp_image_data.size(), 1) != 1) return false; } else if (serializer.write(&image_data[0], image_data.size(), 1) != 1) return false; total_image_data_size += image_data.size(); total_mip_size += image_data.size(); } } } uint32_t num_mip_pad_bytes = 3 - ((total_mip_size + 3) % 4); if ((num_mip_pad_bytes) && (serializer.write(padding, num_mip_pad_bytes, 1) != 1)) return false; total_mip_size += num_mip_pad_bytes; } VOGL_ASSERT((total_mip_size & 3) == 0); VOGL_ASSERT(total_image_data_size == image_size); } return true; }
bool ktx_texture::read_from_stream(data_stream_serializer &serializer) { clear(); // Read header if (serializer.read(&m_header, 1, sizeof(m_header)) != sizeof(ktx_header)) return false; // Check header if (memcmp(s_ktx_file_id, m_header.m_identifier, sizeof(m_header.m_identifier))) return false; if ((m_header.m_endianness != KTX_OPPOSITE_ENDIAN) && (m_header.m_endianness != KTX_ENDIAN)) return false; m_opposite_endianness = (m_header.m_endianness == KTX_OPPOSITE_ENDIAN); if (m_opposite_endianness) { m_header.endian_swap(); if ((m_header.m_glTypeSize != sizeof(uint8_t)) && (m_header.m_glTypeSize != sizeof(uint16_t)) && (m_header.m_glTypeSize != sizeof(uint32_t))) return false; } if (!check_header()) return false; if (!compute_pixel_info()) { #if VOGL_KTX_PVRTEX_WORKAROUNDS // rg [9/10/13] - moved this check into here, instead of in compute_pixel_info(), but need to retest it. if ((!m_header.m_glInternalFormat) && (!m_header.m_glType) && (!m_header.m_glTypeSize) && (!m_header.m_glBaseInternalFormat)) { // PVRTexTool writes bogus headers when outputting ETC1. console::warning("ktx_texture::compute_pixel_info: Header doesn't specify any format, assuming ETC1 and hoping for the best\n"); m_header.m_glBaseInternalFormat = KTX_RGB; m_header.m_glInternalFormat = KTX_ETC1_RGB8_OES; m_header.m_glTypeSize = 1; m_block_dim = 4; m_bytes_per_block = 8; } else #endif return false; } uint8_t pad_bytes[3]; // Read the key value entries uint32_t num_key_value_bytes_remaining = m_header.m_bytesOfKeyValueData; while (num_key_value_bytes_remaining) { if (num_key_value_bytes_remaining < sizeof(uint32_t)) return false; uint32_t key_value_byte_size; if (serializer.read(&key_value_byte_size, 1, sizeof(uint32_t)) != sizeof(uint32_t)) return false; num_key_value_bytes_remaining -= sizeof(uint32_t); if (m_opposite_endianness) key_value_byte_size = utils::swap32(key_value_byte_size); if (key_value_byte_size > num_key_value_bytes_remaining) return false; uint8_vec key_value_data; if (key_value_byte_size) { key_value_data.resize(key_value_byte_size); if (serializer.read(&key_value_data[0], 1, key_value_byte_size) != key_value_byte_size) return false; } m_key_values.push_back(key_value_data); uint32_t padding = 3 - ((key_value_byte_size + 3) % 4); if (padding) { if (serializer.read(pad_bytes, 1, padding) != padding) return false; } num_key_value_bytes_remaining -= key_value_byte_size; if (num_key_value_bytes_remaining < padding) return false; num_key_value_bytes_remaining -= padding; } // Now read the mip levels uint32_t total_faces = get_num_mips() * get_array_size() * get_num_faces() * get_depth(); if ((!total_faces) || (total_faces > 65535)) return false; // See Section 2.8 of KTX file format: No rounding to block sizes should be applied for block compressed textures. // OK, I'm going to break that rule otherwise KTX can only store a subset of textures that DDS can handle for no good reason. #if 0 const uint32_t mip0_row_blocks = m_header.m_pixelWidth / m_block_dim; const uint32_t mip0_col_blocks = VOGL_MAX(1, m_header.m_pixelHeight) / m_block_dim; #else const uint32_t mip0_row_blocks = (m_header.m_pixelWidth + m_block_dim - 1) / m_block_dim; const uint32_t mip0_col_blocks = (VOGL_MAX(1, m_header.m_pixelHeight) + m_block_dim - 1) / m_block_dim; #endif if ((!mip0_row_blocks) || (!mip0_col_blocks)) return false; const uint32_t mip0_depth = VOGL_MAX(1, m_header.m_pixelDepth); VOGL_NOTE_UNUSED(mip0_depth); bool has_valid_image_size_fields = true; bool disable_mip_and_cubemap_padding = false; #if VOGL_KTX_PVRTEX_WORKAROUNDS { // PVRTexTool has a bogus KTX writer that doesn't write any imageSize fields. Nice. size_t expected_bytes_remaining = 0; for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; expected_bytes_remaining += sizeof(uint32_t); if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { for (uint32_t face = 0; face < get_num_faces(); face++) { uint32_t slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; expected_bytes_remaining += slice_size; uint32_t num_cube_pad_bytes = 3 - ((slice_size + 3) % 4); expected_bytes_remaining += num_cube_pad_bytes; } } else { uint32_t total_mip_size = 0; for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { uint32_t slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; total_mip_size += slice_size; } } } expected_bytes_remaining += total_mip_size; uint32_t num_mip_pad_bytes = 3 - ((total_mip_size + 3) % 4); expected_bytes_remaining += num_mip_pad_bytes; } } if (serializer.get_stream()->get_remaining() < expected_bytes_remaining) { has_valid_image_size_fields = false; disable_mip_and_cubemap_padding = true; console::warning("ktx_texture::read_from_stream: KTX file size is smaller than expected - trying to read anyway without imageSize fields\n"); } } #endif for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; uint32_t image_size = 0; if (!has_valid_image_size_fields) { if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { // The KTX file format has an exception for plain cubemap textures, argh. image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; } else { image_size = mip_depth * mip_row_blocks * mip_col_blocks * m_bytes_per_block * get_array_size() * get_num_faces(); } } else { if (serializer.read(&image_size, 1, sizeof(image_size)) != sizeof(image_size)) return false; if (m_opposite_endianness) image_size = utils::swap32(image_size); } if (!image_size) return false; uint32_t total_mip_size = 0; // The KTX file format has an exception for plain cubemap textures, argh. if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { // plain non-array cubemap for (uint32_t face = 0; face < get_num_faces(); face++) { VOGL_ASSERT(m_image_data.size() == get_image_index(mip_level, 0, face, 0)); m_image_data.push_back(uint8_vec()); uint8_vec &image_data = m_image_data.back(); image_data.resize(image_size); if (serializer.read(&image_data[0], 1, image_size) != image_size) return false; if (m_opposite_endianness) utils::endian_swap_mem(&image_data[0], image_size, m_header.m_glTypeSize); uint32_t num_cube_pad_bytes = disable_mip_and_cubemap_padding ? 0 : (3 - ((image_size + 3) % 4)); if (serializer.read(pad_bytes, 1, num_cube_pad_bytes) != num_cube_pad_bytes) return false; total_mip_size += image_size + num_cube_pad_bytes; } } else { uint32_t num_image_bytes_remaining = image_size; // 1D, 2D, 3D (normal or array texture), or array cubemap for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { uint32_t slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; if ((!slice_size) || (slice_size > num_image_bytes_remaining)) return false; uint32_t image_index = get_image_index(mip_level, array_element, face, zslice); m_image_data.ensure_element_is_valid(image_index); uint8_vec &image_data = m_image_data[image_index]; image_data.resize(slice_size); if (serializer.read(&image_data[0], 1, slice_size) != slice_size) return false; if (m_opposite_endianness) utils::endian_swap_mem(&image_data[0], slice_size, m_header.m_glTypeSize); num_image_bytes_remaining -= slice_size; total_mip_size += slice_size; } } } if (num_image_bytes_remaining) { VOGL_ASSERT_ALWAYS; return false; } } uint32_t num_mip_pad_bytes = disable_mip_and_cubemap_padding ? 0 : (3 - ((total_mip_size + 3) % 4)); if (serializer.read(pad_bytes, 1, num_mip_pad_bytes) != num_mip_pad_bytes) return false; } return true; }
bool ktx_texture::operator==(const ktx_texture &rhs) const { if (this == &rhs) return true; // This is not super deep because I want to avoid poking around into internal state (such as the header) #define CMP(x) \ if (x != rhs.x) \ return false; CMP(get_ogl_internal_fmt()); CMP(get_width()); CMP(get_height()); CMP(get_depth()); CMP(get_num_mips()); CMP(get_array_size()); CMP(get_num_faces()); CMP(is_compressed()); CMP(get_block_dim()); // The image fmt/type shouldn't matter with compressed textures. if (!is_compressed()) { CMP(get_ogl_fmt()); CMP(get_ogl_type()); } CMP(get_total_images()); CMP(get_opposite_endianness()); // Do an order insensitive key/value comparison. dynamic_string_array lhs_keys; get_keys(lhs_keys); dynamic_string_array rhs_keys; rhs.get_keys(rhs_keys); if (lhs_keys.size() != rhs_keys.size()) return false; lhs_keys.sort(dynamic_string_less_than_case_sensitive()); rhs_keys.sort(dynamic_string_less_than_case_sensitive()); for (uint32_t i = 0; i < lhs_keys.size(); i++) if (lhs_keys[i].compare(rhs_keys[i], true) != 0) return false; for (uint32_t i = 0; i < lhs_keys.size(); i++) { uint8_vec lhs_data, rhs_data; if (!get_key_value_data(lhs_keys[i].get_ptr(), lhs_data)) return false; if (!get_key_value_data(lhs_keys[i].get_ptr(), rhs_data)) return false; if (lhs_data != rhs_data) return false; } // Compare images. for (uint32_t l = 0; l < get_num_mips(); l++) { for (uint32_t a = 0; a < get_array_size(); a++) { for (uint32_t f = 0; f < get_num_faces(); f++) { for (uint32_t z = 0; z < get_depth(); z++) { const uint8_vec &lhs_img = get_image_data(l, a, f, z); const uint8_vec &rhs_img = rhs.get_image_data(l, a, f, z); if (lhs_img != rhs_img) return false; } } } } #undef CMP return true; }
/** * @param classRecord Record for method class. * @param methodRecord Calle's method record. * @param retAddr What the PC should be upon return. * @return true iff the stack frame was pushed. */ boolean dispatch_special (MethodRecord *methodRecord, byte *retAddr) { #if DEBUG_METHODS int debug_ctr; #endif StackFrame *stackFrame; byte newStackFrameIndex; #if DEBUG_BYTECODE printf ("\n------ dispatch special - %d ------------------\n\n", methodRecord->signatureId); #endif #if DEBUG_METHODS printf ("dispatch_special: %d, %d\n", (int) methodRecord, (int) retAddr); printf ("-- signature id = %d\n", methodRecord->signatureId); printf ("-- code offset = %d\n", methodRecord->codeOffset); printf ("-- flags = %d\n", methodRecord->mflags); printf ("-- num params = %d\n", methodRecord->numParameters); printf ("-- stack ptr = %d\n", (int) get_stack_ptr()); printf ("-- max stack ptr= %d\n", (int) (currentThread->stackArray + (get_array_size(currentThread->stackArray))*2)); #endif pop_words (methodRecord->numParameters); pc = retAddr; if (is_native (methodRecord)) { #if DEBUG_METHODS printf ("-- native\n"); #endif dispatch_native (methodRecord->signatureId, get_stack_ptr() + 1); // Stack frame not pushed return false; } newStackFrameIndex = currentThread->stackFrameArraySize; if (newStackFrameIndex >= get_array_length((Object *) word2ptr (currentThread->stackFrameArray))) { #if !FIXED_STACK_SIZE // int len = get_array_length((Object *) word2ptr (currentThread->stackFrameArray)); int newlen = get_array_length((Object *) word2ptr (currentThread->stackFrameArray)) * 3 / 2; JINT newStackFrameArray = JNULL; // Stack frames are indexed by a byte value so limit the size. if (newlen <= 255) { // increase the stack frame size newStackFrameArray = ptr2word(reallocate_array(word2ptr(currentThread->stackFrameArray), newlen)); } // If can't allocate new stack, give in! if (newStackFrameArray == JNULL) { #endif throw_exception (stackOverflowError); return false; #if !FIXED_STACK_SIZE } // Assign new array currentThread->stackFrameArray = newStackFrameArray; #endif } if (newStackFrameIndex == 0) { // Assign NEW stack frame stackFrame = stackframe_array(); } else { #if DEBUG_METHODS for (debug_ctr = 0; debug_ctr < methodRecord->numParameters; debug_ctr++) printf ("-- param[%d] = %ld\n", debug_ctr, (long) get_stack_ptr()[debug_ctr+1]); #endif // Save OLD stackFrame state stackFrame = stackframe_array() + (newStackFrameIndex - 1); update_stack_frame (stackFrame); // Push NEW stack frame stackFrame++; } // Increment size of stack frame array currentThread->stackFrameArraySize++; // Initialize rest of new stack frame stackFrame->methodRecord = methodRecord; stackFrame->monitor = null; stackFrame->localsBase = get_stack_ptr() + 1; // Initialize auxiliary global variables (registers) pc = get_code_ptr(methodRecord); #if DEBUG_METHODS printf ("pc set to 0x%X\n", (int) pc); #endif init_sp (stackFrame, methodRecord); update_constant_registers (stackFrame); //printf ("m %d stack = %d\n", (int) methodRecord->signatureId, (int) (localsBase - stack_array())); // Check for stack overflow // (stackTop + methodRecord->maxOperands) >= (stack_array() + STACK_SIZE); if (is_stack_overflow (methodRecord)) { #if !FIXED_STACK_SIZE StackFrame *stackBase; int i; // Need at least this many bytes // int len = (int)(stackTop + methodRecord->maxOperands) - (int)(stack_array()) - HEADER_SIZE; // Need to compute new array size (as distinct from number of bytes in array). int newlen = (((int)(stackTop + methodRecord->maxOperands) - (int)(stack_array()) - HEADER_SIZE + 1) / 4) * 3 / 2; JINT newStackArray = ptr2word(reallocate_array(word2ptr(currentThread->stackArray), newlen)); // If can't allocate new stack, give in! if (newStackArray == JNULL) { #endif throw_exception (stackOverflowError); return false; #if !FIXED_STACK_SIZE } // Adjust pointers. newlen = newStackArray - currentThread->stackArray; stackBase = stackframe_array(); stackTop = word2ptr(ptr2word(stackTop) + newlen); localsBase = word2ptr(ptr2word(localsBase) + newlen); #if DEBUG_MEMORY printf("thread=%d, stackTop(%d), localsBase(%d)=%d\n", currentThread->threadId, (int)stackTop, (int)localsBase, (int)(*localsBase)); #endif for (i=currentThread->stackFrameArraySize-1; i >= 0; i--) { stackBase[i].localsBase = word2ptr(ptr2word(stackBase[i].localsBase) + newlen); stackBase[i].stackTop = word2ptr(ptr2word(stackBase[i].stackTop) + newlen); #if DEBUG_MEMORY printf("stackBase[%d].localsBase(%d) = %d\n", i, (int)stackBase[i].localsBase, (int)(*stackBase[i].localsBase)); #endif } // Assign new array currentThread->stackArray = newStackArray; #endif } return true; }