bool ktx_texture::consistency_check() const { if (!check_header()) return false; uint32_t block_dim = 0, bytes_per_block = 0; if ((!m_header.m_glType) || (!m_header.m_glFormat)) { if ((m_header.m_glType) || (m_header.m_glFormat)) return false; if (!ktx_get_ogl_fmt_desc(m_header.m_glInternalFormat, m_header.m_glType, block_dim, bytes_per_block)) return false; if (block_dim == 1) return false; //if ((get_width() % block_dim) || (get_height() % block_dim)) // return false; } else { if (!ktx_get_ogl_fmt_desc(m_header.m_glFormat, m_header.m_glType, block_dim, bytes_per_block)) return false; if (block_dim > 1) return false; } if ((m_block_dim != block_dim) || (m_bytes_per_block != bytes_per_block)) return false; uint32_t total_expected_images = get_total_images(); if (m_image_data.size() != total_expected_images) return false; for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { const uint8_vec &image_data = get_image_data(get_image_index(mip_level, array_element, face, zslice)); uint32_t expected_image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; if (image_data.size() != expected_image_size) return false; } } } } return true; }
bool ktx_texture::check_header() const { if (((get_num_faces() != 1) && (get_num_faces() != 6)) || (!m_header.m_pixelWidth)) return false; if ((!m_header.m_pixelHeight) && (m_header.m_pixelDepth)) return false; if ((get_num_faces() == 6) && ((m_header.m_pixelDepth) || (!m_header.m_pixelHeight))) return false; #if 0 if (m_header.m_numberOfMipmapLevels) { const uint32_t max_mipmap_dimension = 1U << (m_header.m_numberOfMipmapLevels - 1U); if (max_mipmap_dimension > (VOGL_MAX(VOGL_MAX(m_header.m_pixelWidth, m_header.m_pixelHeight), m_header.m_pixelDepth))) return false; } #endif return true; }
uint32_t ktx_texture::get_total_images() const { if (!is_valid() || !get_num_mips()) return 0; // bogus: //return get_num_mips() * (get_depth() * get_num_faces() * get_array_size()); // Naive algorithm, could just compute based off the # of mips uint32_t max_index = 0; for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t total_zslices = math::maximum<uint32_t>(get_depth() >> mip_level, 1U); uint32_t index = get_image_index(mip_level, get_array_size() - 1, get_num_faces() - 1, total_zslices - 1); max_index = math::maximum<uint32_t>(max_index, index); } return max_index + 1; }
void build_elem_elem(Exo_DB *exo) { int ce; int count; int e; int ebi; int elem; int ename; int face; //int his_dim, her_dim; int i, j; int index; int len_curr; int len_prev; int len_intr; int length, length_new, num_faces; int iel; int ioffset; int n; int neighbor_name = -1; int node; int num_elem_sides; int num_nodes; int snl[MAX_NODES_PER_SIDE]; /* Side Node List - NOT Saturday Night Live! */ char err_msg[MAX_CHAR_ERR_MSG]; /* * Integer arrays used to find intersection sets of node->element lists. */ int prev_set[MAX_EPN]; /* list of elements attached to previous node*/ int curr_set[MAX_EPN]; /* list of elements attached to "this" node */ int interset[MAX_EPN]; /* values of hits between */ int ip[MAX_EPN]; /* indeces of hits for prev_set[] */ int ic[MAX_EPN]; /* indeces of hits for curr_set[] */ /* * If the element->node and node->element connectivities have not been * built, then we won't be able to do this task. */ if ( ! exo->elem_node_conn_exists || ! exo->node_elem_conn_exists ) { EH(-1, "Build elem->node before node->elem."); return; } /* * The number of elements connected via conventional faces may be deduced * from the number of elements and their type. */ exo->elem_elem_pntr = (int *) smalloc((exo->num_elems+1)*sizeof(int)); length = 0; for ( i=0; i<exo->num_elem_blocks; i++) { length += exo->eb_num_elems[i] * get_num_faces(exo->eb_elem_type[i]); } exo->elem_elem_list = (int *) smalloc(length*sizeof(int)); /* * Initialize... */ for ( i=0; i<length; i++) { exo->elem_elem_list[i] = UNASSIGNED_YET; } /* elem = 0; for ( ebi=0; ebi<exo->num_elem_blocks; ebi++) { num_elem_sides = get_num_faces(exo->eb_elem_type[ebi]); for ( e=0; e<exo->eb_num_elems[ebi]; e++) { exo->elem_elem_pntr[elem] = count; elem++; count += num_elem_sides; } } */ /* * Walk through the elements, block by block. */ count = 0; elem = 0; for ( ebi=0; ebi<exo->num_elem_blocks; ebi++) { num_elem_sides = get_num_faces(exo->eb_elem_type[ebi]); for ( e=0; e<exo->eb_num_elems[ebi]; e++,elem++) { exo->elem_elem_pntr[elem] = count; count += num_elem_sides; /* * Look at each side of the element, collecting a unique * list of integers corresponding to the minimum number of nodes * needed to identify an entire side. * * Typically, the same number of nodes as space dimensions are * needed, with exceptions being the various "sides" of shells, * beams and trusses... */ for ( face=0; face<num_elem_sides; face++) { /* * Given the element and the face construct the * list of node numbers that determine that side. */ /* * Later, we might not need *all* the nodes on a side, * particularly for high order elements. It may suffice * to check only as many nodes as space dimensions that * the element lives in... */ num_nodes = build_side_node_list(elem, face, ebi, exo, snl); #ifdef DEBUG fprintf(stderr, "Elem %d, face %d has %d nodes: ", elem, face, num_nodes); for ( i=0; i<num_nodes; i++) { fprintf(stderr, " %d", snl[i]); } fprintf(stderr, "\n"); #endif /* DEBUG */ /* * Cross check: for each node in the side there is a list * of elements connected to it. Beginning with all the * elements connected to the first node (except for this given * element), cross check with all the elements connected with * the 2nd node to build an intersection set of one element. */ for ( i=0; i<MAX_EPN; i++) { prev_set[i] = -1; curr_set[i] = -1; interset[i] = -1; } len_prev = 0; len_curr = 0; len_intr = 0; for ( n=0; n<num_nodes; n++) { /* * Copy this node's element list into a clean "curr_set" array * that will be intersected with any previously gathered * lists of elements that qualify as promiscuously in * contact with nodes... */ node = snl[n]; for ( i=0; i<MAX_EPN; i++) { curr_set[i] = -1; } len_curr = 0; #ifdef DEBUG fprintf(stderr, "Traversing n->e connectivity of node %d\n", node); #endif /* DEBUG */ for ( ce=exo->node_elem_pntr[node]; ce<exo->node_elem_pntr[node+1]; ce++) { ename = exo->node_elem_list[ce]; #ifdef DEBUG fprintf(stderr, "\telem %d\n", ename); #endif /* DEBUG */ /* * Go ahead and accumulate the self element name * just as a consistency check.... */ /* if ( ename != e ) { } */ /* * PKN: The current Goma use of ->elem_elem... * is such that this connectivity should list * connections like QUAD-BAR or HEX-SHELL. * So, I'll add this dimension matching conditional */ /* PRS (Summer 2012): Need to change this for shell stacks which have the same dim*/ /* We need however to consider a special case (as of 8/30/2012 * this is a SHELL-on-SHELL stack. Viz. two materials, each a shell material * which share not a side but a face. Since faces of shells are sides * in patran speak, we need some special logic. We need to avoid adding * the friend shell element (neighboring material) to the current shell element * even though each material has the same number of sides. * Here goes (BTW, I cannot find max-nodes-per-element anywhere!!!!) */ int shell_on_shell = 0; int flippy_flop = 0; int nbr_ebid; int nbr_num_elem_sides; nbr_ebid = fence_post(ename, exo->eb_ptr, exo->num_elem_blocks+1); EH(nbr_ebid, "Bad element block ID!"); nbr_num_elem_sides = get_num_faces(exo->eb_elem_type[nbr_ebid]); shell_on_shell = 0; flippy_flop = 0; if (exo->eb_id[ebi] < 100 && exo->eb_id[nbr_ebid] >= 100) flippy_flop=1; if (exo->eb_id[ebi] >= 100 && exo->eb_id[nbr_ebid] < 100) flippy_flop=1; if ((nbr_ebid != ebi) && (strstr(exo->eb_elem_type[nbr_ebid], "SHELL")) && (strstr(exo->eb_elem_type[ebi], "SHELL")) && flippy_flop) shell_on_shell = 1; // his_dim = elem_info(NDIM, exo->eb_elem_itype[ebi]); // her_dim = elem_info(NDIM, exo->eb_elem_itype[exo->elem_eb[ename]]); // if( his_dim == her_dim ) if (nbr_num_elem_sides == num_elem_sides && !shell_on_shell) { curr_set[len_curr] = ename; len_curr++; } } /* * The first node is special - we'll just compare * it with itself by making the "previous set" just the * same as the current set... */ if ( n == 0 ) { for ( i=0; i<MAX_EPN; i++) { prev_set[i] = curr_set[i]; } len_prev = len_curr; } #ifdef DEBUG fprintf(stderr, "\ncurr_set: "); for ( i=0; i<len_curr; i++) { fprintf(stderr, "%d ", curr_set[i]); } fprintf(stderr, "\nprev_set: "); for ( i=0; i<len_prev; i++) { fprintf(stderr, "%d ", prev_set[i]); } #endif /* DEBUG */ /* * First, clean the intersection list and the list of * hit indeces in the previous and current lists. * * Then find the intersection of the previous and current * sets of elements attached to the previous and current * nodes... */ for ( i=0; i<MAX_EPN; i++) { interset[i] = -1; ip[i] = -1; ic[i] = -1; } len_intr = 0; len_intr = int_intersect(prev_set, curr_set, len_prev, len_curr, ip, ic); #ifdef DEBUG fprintf(stderr, "num_hits = %d\n", len_intr); #endif /* DEBUG */ /* * Now, let's make the intersection set the next previous * set of elements, a standard for comparison. We should * eventually boil down to either one or zero elements * that qualify... */ for ( i=0; i<MAX_EPN; i++) { prev_set[i] = -1; } for ( i=0; i<len_intr; i++) { prev_set[i] = curr_set[ic[i]]; } len_prev = len_intr; } #ifdef DEBUG fprintf(stderr, "Element [%d], face [%d], local_node [%d]\n", elem, face, n); fprintf(stderr, "Intersection set length = %d\n", len_intr); #endif /* DEBUG */ /* * Now consider the different cases. */ if ( len_intr == 2 ) { /* * The boiled list contains self and one other element. */ if ( prev_set[0] == elem ) { neighbor_name = prev_set[1]; } else { neighbor_name = prev_set[0]; if ( prev_set[1] != elem ) { sr = sprintf(err_msg, "2 elems ( %d %d ) 1 should be %d!", prev_set[0], prev_set[1], elem); EH(-1, err_msg); } } } else if ( len_intr == 1 && prev_set[0] == elem ) { /* * The boiled list has one member, this element. * * The face must connect either to outer space or to * another processor. */ if ( Num_Proc == 1 ) { neighbor_name = -1; } else { neighbor_name = -1; /* * I am going to punt for now. Later, revisit this * condition and insert code to check for neighbor * processors containing all the same face nodes. * * EH(-1, "Not done yet..."); * */ /* * Check if ALL the nodes on this face belong * to another processors list of nodes. I.e., the * node must all be in the external node list of * and belong to the same external processor. */ } } /* * Pathological cases that normally should not occur.... */ else if ( len_intr == 0 ) { sr = sprintf(err_msg, "Elem %d, face %d should self contain!", elem, face); EH(-1, err_msg); } else if ( len_intr == 1 && prev_set[0] != elem ) { sr = sprintf(err_msg, "Elem %d, face %d only connects with elem %d ?", elem, face, prev_set[0]); EH(-1, err_msg); } else { sr = sprintf(err_msg, "Unknown elem-elem connection elem %d, face %d, len_intr=%d", elem, face, len_intr); WH(-1, err_msg); } /* * Now we know how to assign the neighbor name for this face * of the element. */ index = exo->elem_elem_pntr[elem] + face; exo->elem_elem_list[index] = neighbor_name; } /* end face loop this elem */ } /* end elem loop this elemblock */ } /* end elem block loop */ exo->elem_elem_pntr[exo->num_elems] = count; /* last fencepost */ exo->elem_elem_conn_exists = TRUE; if (Linear_Solver == FRONT) { /* * Now that we have elem_elem_pntr and elem_elem_list for our parallel * world, we are going to use them also for optimal element bandwidth * reduction ordering. We will use METIS, but METIS requires the CSR * format, which is compressed, viz. we need to remove the -1s. Here * we go */ /* First check for the assumption that all blocks have same number of element faces. Stop if they don't and issue an error to the next aspiring developer */ for ( i=0; i<exo->num_elem_blocks; i++) { if(get_num_faces(exo->eb_elem_type[0]) != get_num_faces(exo->eb_elem_type[i]) ) { EH(-1,"Stop! We cannot reorder these elements with METIS with elemement type changes"); } } /* Now begin */ exo->elem_elem_xadj = (int *) smalloc((exo->num_elems+1)*sizeof(int)); /*initialize */ for(e=0; e<exo->num_elems+1 ; e++) { exo->elem_elem_xadj[e] = exo->elem_elem_pntr[e]; } /* Recompute length of adjacency list by removing external edges */ length_new = 0; for (i = 0; i < length; i++) { if(exo->elem_elem_list[i] != -1) length_new++; } exo->elem_elem_adjncy = alloc_int_1(length_new, -1); /* Now convert */ ioffset=0; for(iel = 0; iel < exo->num_elems; iel++) { /* Big assumption here that all blocks have the same */ /* element type. Can be furbished later since this is */ /* just for the frontal solver */ num_faces = get_num_faces(exo->eb_elem_type[0]); for (i= iel*num_faces; i < (iel+1)*num_faces; i++) { j = i - ioffset; if(exo->elem_elem_list[i] == -1) { ioffset++; for(e=iel +1; e <exo->num_elems+1; e++)exo->elem_elem_xadj[e]--; } else { exo->elem_elem_adjncy[j] = exo->elem_elem_list[i]; } } } /* convert to Fortran style */ for(e=0; e<exo->num_elems+1 ; e++) exo->elem_elem_xadj[e]++; for ( i=0; i<length_new; i++) exo->elem_elem_adjncy[i]++; } /* End FRONTAL_SOLVER if */ /* * Verification that every element/face has assigned something besides * the initial default value of "unassigned". * * For your convenience - FORTRAN 1-based numbering. */ #ifdef DEBUG for ( e=0; e<exo->num_elems; e++) { fprintf(stdout, "Elem %3d:", e+1); for ( ce=exo->elem_elem_pntr[e]; ce<exo->elem_elem_pntr[e+1]; ce++) { if ( exo->elem_elem_list[ce] == -1 ) { fprintf(stdout, " spc"); } else if ( exo->elem_elem_list[ce] < -1 ) { fprintf(stdout, " prc"); } else { fprintf(stdout, " %3d", exo->elem_elem_list[ce] + 1); } if ( exo->elem_elem_list[ce] == UNASSIGNED_YET ) { sr = sprintf(err_msg, "You need to plug a leak at elem (%d) face (%d)", exo->elem_elem_list[ce] + 1, ce - exo->elem_elem_pntr[e] + 1); EH(-1, err_msg); } } fprintf(stdout, "\n"); } #endif /* DEBUG */ #if FALSE demo_elem_elem_conn(exo); #endif return; }
void do_physics_align_object( object * obj ) { vms_vector desired_upvec; fixang delta_ang,roll_ang; //vms_vector forvec = {0,0,f1_0}; vms_matrix temp_matrix; fix d,largest_d=-f1_0; int i,best_side; best_side=0; // bank player according to segment orientation //find side of segment that player is most alligned with for (i=0;i<6;i++) { #ifdef COMPACT_SEGS vms_vector _tv1; get_side_normal( &Segments[obj->segnum], i, 0, &_tv1 ); d = vm_vec_dot(&_tv1,&obj->orient.uvec); #else d = vm_vec_dot(&Segments[obj->segnum].sides[i].normals[0],&obj->orient.uvec); #endif if (d > largest_d) {largest_d = d; best_side=i;} } if (floor_levelling) { // old way: used floor's normal as upvec #ifdef COMPACT_SEGS get_side_normal(&Segments[obj->segnum], 3, 0, &desired_upvec ); #else desired_upvec = Segments[obj->segnum].sides[3].normals[0]; #endif } else // new player leveling code: use normal of side closest to our up vec if (get_num_faces(&Segments[obj->segnum].sides[best_side])==2) { #ifdef COMPACT_SEGS vms_vector normals[2]; get_side_normals(&Segments[obj->segnum], best_side, &normals[0], &normals[1] ); desired_upvec.x = (normals[0].x + normals[1].x) / 2; desired_upvec.y = (normals[0].y + normals[1].y) / 2; desired_upvec.z = (normals[0].z + normals[1].z) / 2; vm_vec_normalize(&desired_upvec); #else side *s = &Segments[obj->segnum].sides[best_side]; desired_upvec.x = (s->normals[0].x + s->normals[1].x) / 2; desired_upvec.y = (s->normals[0].y + s->normals[1].y) / 2; desired_upvec.z = (s->normals[0].z + s->normals[1].z) / 2; vm_vec_normalize(&desired_upvec); #endif } else #ifdef COMPACT_SEGS get_side_normal(&Segments[obj->segnum], best_side, 0, &desired_upvec ); #else desired_upvec = Segments[obj->segnum].sides[best_side].normals[0]; #endif if (labs(vm_vec_dot(&desired_upvec,&obj->orient.fvec)) < f1_0/2) { vms_angvec tangles; vm_vector_2_matrix(&temp_matrix,&obj->orient.fvec,&desired_upvec,NULL); delta_ang = vm_vec_delta_ang(&obj->orient.uvec,&temp_matrix.uvec,&obj->orient.fvec); delta_ang += obj->mtype.phys_info.turnroll; if (abs(delta_ang) > DAMP_ANG) { vms_matrix rotmat, new_pm; roll_ang = fixmul(FrameTime,ROLL_RATE); if (abs(delta_ang) < roll_ang) roll_ang = delta_ang; else if (delta_ang<0) roll_ang = -roll_ang; tangles.p = tangles.h = 0; tangles.b = roll_ang; vm_angles_2_matrix(&rotmat,&tangles); vm_matrix_x_matrix(&new_pm,&obj->orient,&rotmat); obj->orient = new_pm; } else floor_levelling=0; } }
bool ktx_texture::write_to_stream(data_stream_serializer &serializer, bool no_keyvalue_data) const { if (!consistency_check()) { VOGL_ASSERT_ALWAYS; return false; } memcpy(m_header.m_identifier, s_ktx_file_id, sizeof(m_header.m_identifier)); m_header.m_endianness = m_opposite_endianness ? KTX_OPPOSITE_ENDIAN : KTX_ENDIAN; if (m_block_dim == 1) { m_header.m_glTypeSize = ktx_get_ogl_type_size(m_header.m_glType); m_header.m_glBaseInternalFormat = m_header.m_glFormat; } else { m_header.m_glBaseInternalFormat = ktx_get_ogl_compressed_base_internal_fmt(m_header.m_glInternalFormat); } m_header.m_bytesOfKeyValueData = 0; if (!no_keyvalue_data) { for (uint32_t i = 0; i < m_key_values.size(); i++) m_header.m_bytesOfKeyValueData += sizeof(uint32_t) + ((m_key_values[i].size() + 3) & ~3); } if (m_opposite_endianness) m_header.endian_swap(); bool success = (serializer.write(&m_header, sizeof(m_header), 1) == 1); if (m_opposite_endianness) m_header.endian_swap(); if (!success) return success; uint32_t total_key_value_bytes = 0; const uint8_t padding[3] = { 0, 0, 0 }; if (!no_keyvalue_data) { for (uint32_t i = 0; i < m_key_values.size(); i++) { uint32_t key_value_size = m_key_values[i].size(); if (m_opposite_endianness) key_value_size = utils::swap32(key_value_size); success = (serializer.write(&key_value_size, sizeof(key_value_size), 1) == 1); total_key_value_bytes += sizeof(key_value_size); if (m_opposite_endianness) key_value_size = utils::swap32(key_value_size); if (!success) return false; if (key_value_size) { if (serializer.write(&m_key_values[i][0], key_value_size, 1) != 1) return false; total_key_value_bytes += key_value_size; uint32_t num_padding = 3 - ((key_value_size + 3) % 4); if ((num_padding) && (serializer.write(padding, num_padding, 1) != 1)) return false; total_key_value_bytes += num_padding; } } (void)total_key_value_bytes; } VOGL_ASSERT(total_key_value_bytes == m_header.m_bytesOfKeyValueData); for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; uint32_t image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; if ((m_header.m_numberOfArrayElements) || (get_num_faces() == 1)) image_size *= (get_array_size() * get_num_faces() * mip_depth); if (!image_size) { VOGL_ASSERT_ALWAYS; return false; } if (m_opposite_endianness) image_size = utils::swap32(image_size); success = (serializer.write(&image_size, sizeof(image_size), 1) == 1); if (m_opposite_endianness) image_size = utils::swap32(image_size); if (!success) return false; uint32_t total_mip_size = 0; uint32_t total_image_data_size = 0; if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { // plain non-array cubemap for (uint32_t face = 0; face < get_num_faces(); face++) { const uint8_vec &image_data = get_image_data(get_image_index(mip_level, 0, face, 0)); if ((!image_data.size()) || (image_data.size() != image_size)) return false; if (m_opposite_endianness) { uint8_vec tmp_image_data(image_data); utils::endian_swap_mem(&tmp_image_data[0], tmp_image_data.size(), m_header.m_glTypeSize); if (serializer.write(&tmp_image_data[0], tmp_image_data.size(), 1) != 1) return false; } else if (serializer.write(&image_data[0], image_data.size(), 1) != 1) return false; // Not +=, but =, because of the silly image_size plain cubemap exception in the KTX file format total_image_data_size = image_data.size(); uint32_t num_cube_pad_bytes = 3 - ((image_data.size() + 3) % 4); if ((num_cube_pad_bytes) && (serializer.write(padding, num_cube_pad_bytes, 1) != 1)) return false; total_mip_size += image_size + num_cube_pad_bytes; } } else { // 1D, 2D, 3D (normal or array texture), or array cubemap for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { const uint8_vec &image_data = get_image_data(get_image_index(mip_level, array_element, face, zslice)); if (!image_data.size()) return false; if (m_opposite_endianness) { uint8_vec tmp_image_data(image_data); utils::endian_swap_mem(&tmp_image_data[0], tmp_image_data.size(), m_header.m_glTypeSize); if (serializer.write(&tmp_image_data[0], tmp_image_data.size(), 1) != 1) return false; } else if (serializer.write(&image_data[0], image_data.size(), 1) != 1) return false; total_image_data_size += image_data.size(); total_mip_size += image_data.size(); } } } uint32_t num_mip_pad_bytes = 3 - ((total_mip_size + 3) % 4); if ((num_mip_pad_bytes) && (serializer.write(padding, num_mip_pad_bytes, 1) != 1)) return false; total_mip_size += num_mip_pad_bytes; } VOGL_ASSERT((total_mip_size & 3) == 0); VOGL_ASSERT(total_image_data_size == image_size); } return true; }
bool ktx_texture::read_from_stream(data_stream_serializer &serializer) { clear(); // Read header if (serializer.read(&m_header, 1, sizeof(m_header)) != sizeof(ktx_header)) return false; // Check header if (memcmp(s_ktx_file_id, m_header.m_identifier, sizeof(m_header.m_identifier))) return false; if ((m_header.m_endianness != KTX_OPPOSITE_ENDIAN) && (m_header.m_endianness != KTX_ENDIAN)) return false; m_opposite_endianness = (m_header.m_endianness == KTX_OPPOSITE_ENDIAN); if (m_opposite_endianness) { m_header.endian_swap(); if ((m_header.m_glTypeSize != sizeof(uint8_t)) && (m_header.m_glTypeSize != sizeof(uint16_t)) && (m_header.m_glTypeSize != sizeof(uint32_t))) return false; } if (!check_header()) return false; if (!compute_pixel_info()) { #if VOGL_KTX_PVRTEX_WORKAROUNDS // rg [9/10/13] - moved this check into here, instead of in compute_pixel_info(), but need to retest it. if ((!m_header.m_glInternalFormat) && (!m_header.m_glType) && (!m_header.m_glTypeSize) && (!m_header.m_glBaseInternalFormat)) { // PVRTexTool writes bogus headers when outputting ETC1. console::warning("ktx_texture::compute_pixel_info: Header doesn't specify any format, assuming ETC1 and hoping for the best\n"); m_header.m_glBaseInternalFormat = KTX_RGB; m_header.m_glInternalFormat = KTX_ETC1_RGB8_OES; m_header.m_glTypeSize = 1; m_block_dim = 4; m_bytes_per_block = 8; } else #endif return false; } uint8_t pad_bytes[3]; // Read the key value entries uint32_t num_key_value_bytes_remaining = m_header.m_bytesOfKeyValueData; while (num_key_value_bytes_remaining) { if (num_key_value_bytes_remaining < sizeof(uint32_t)) return false; uint32_t key_value_byte_size; if (serializer.read(&key_value_byte_size, 1, sizeof(uint32_t)) != sizeof(uint32_t)) return false; num_key_value_bytes_remaining -= sizeof(uint32_t); if (m_opposite_endianness) key_value_byte_size = utils::swap32(key_value_byte_size); if (key_value_byte_size > num_key_value_bytes_remaining) return false; uint8_vec key_value_data; if (key_value_byte_size) { key_value_data.resize(key_value_byte_size); if (serializer.read(&key_value_data[0], 1, key_value_byte_size) != key_value_byte_size) return false; } m_key_values.push_back(key_value_data); uint32_t padding = 3 - ((key_value_byte_size + 3) % 4); if (padding) { if (serializer.read(pad_bytes, 1, padding) != padding) return false; } num_key_value_bytes_remaining -= key_value_byte_size; if (num_key_value_bytes_remaining < padding) return false; num_key_value_bytes_remaining -= padding; } // Now read the mip levels uint32_t total_faces = get_num_mips() * get_array_size() * get_num_faces() * get_depth(); if ((!total_faces) || (total_faces > 65535)) return false; // See Section 2.8 of KTX file format: No rounding to block sizes should be applied for block compressed textures. // OK, I'm going to break that rule otherwise KTX can only store a subset of textures that DDS can handle for no good reason. #if 0 const uint32_t mip0_row_blocks = m_header.m_pixelWidth / m_block_dim; const uint32_t mip0_col_blocks = VOGL_MAX(1, m_header.m_pixelHeight) / m_block_dim; #else const uint32_t mip0_row_blocks = (m_header.m_pixelWidth + m_block_dim - 1) / m_block_dim; const uint32_t mip0_col_blocks = (VOGL_MAX(1, m_header.m_pixelHeight) + m_block_dim - 1) / m_block_dim; #endif if ((!mip0_row_blocks) || (!mip0_col_blocks)) return false; const uint32_t mip0_depth = VOGL_MAX(1, m_header.m_pixelDepth); VOGL_NOTE_UNUSED(mip0_depth); bool has_valid_image_size_fields = true; bool disable_mip_and_cubemap_padding = false; #if VOGL_KTX_PVRTEX_WORKAROUNDS { // PVRTexTool has a bogus KTX writer that doesn't write any imageSize fields. Nice. size_t expected_bytes_remaining = 0; for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; expected_bytes_remaining += sizeof(uint32_t); if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { for (uint32_t face = 0; face < get_num_faces(); face++) { uint32_t slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; expected_bytes_remaining += slice_size; uint32_t num_cube_pad_bytes = 3 - ((slice_size + 3) % 4); expected_bytes_remaining += num_cube_pad_bytes; } } else { uint32_t total_mip_size = 0; for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { uint32_t slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; total_mip_size += slice_size; } } } expected_bytes_remaining += total_mip_size; uint32_t num_mip_pad_bytes = 3 - ((total_mip_size + 3) % 4); expected_bytes_remaining += num_mip_pad_bytes; } } if (serializer.get_stream()->get_remaining() < expected_bytes_remaining) { has_valid_image_size_fields = false; disable_mip_and_cubemap_padding = true; console::warning("ktx_texture::read_from_stream: KTX file size is smaller than expected - trying to read anyway without imageSize fields\n"); } } #endif for (uint32_t mip_level = 0; mip_level < get_num_mips(); mip_level++) { uint32_t mip_width, mip_height, mip_depth; get_mip_dim(mip_level, mip_width, mip_height, mip_depth); const uint32_t mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim; const uint32_t mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim; if ((!mip_row_blocks) || (!mip_col_blocks)) return false; uint32_t image_size = 0; if (!has_valid_image_size_fields) { if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { // The KTX file format has an exception for plain cubemap textures, argh. image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; } else { image_size = mip_depth * mip_row_blocks * mip_col_blocks * m_bytes_per_block * get_array_size() * get_num_faces(); } } else { if (serializer.read(&image_size, 1, sizeof(image_size)) != sizeof(image_size)) return false; if (m_opposite_endianness) image_size = utils::swap32(image_size); } if (!image_size) return false; uint32_t total_mip_size = 0; // The KTX file format has an exception for plain cubemap textures, argh. if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6)) { // plain non-array cubemap for (uint32_t face = 0; face < get_num_faces(); face++) { VOGL_ASSERT(m_image_data.size() == get_image_index(mip_level, 0, face, 0)); m_image_data.push_back(uint8_vec()); uint8_vec &image_data = m_image_data.back(); image_data.resize(image_size); if (serializer.read(&image_data[0], 1, image_size) != image_size) return false; if (m_opposite_endianness) utils::endian_swap_mem(&image_data[0], image_size, m_header.m_glTypeSize); uint32_t num_cube_pad_bytes = disable_mip_and_cubemap_padding ? 0 : (3 - ((image_size + 3) % 4)); if (serializer.read(pad_bytes, 1, num_cube_pad_bytes) != num_cube_pad_bytes) return false; total_mip_size += image_size + num_cube_pad_bytes; } } else { uint32_t num_image_bytes_remaining = image_size; // 1D, 2D, 3D (normal or array texture), or array cubemap for (uint32_t array_element = 0; array_element < get_array_size(); array_element++) { for (uint32_t face = 0; face < get_num_faces(); face++) { for (uint32_t zslice = 0; zslice < mip_depth; zslice++) { uint32_t slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block; if ((!slice_size) || (slice_size > num_image_bytes_remaining)) return false; uint32_t image_index = get_image_index(mip_level, array_element, face, zslice); m_image_data.ensure_element_is_valid(image_index); uint8_vec &image_data = m_image_data[image_index]; image_data.resize(slice_size); if (serializer.read(&image_data[0], 1, slice_size) != slice_size) return false; if (m_opposite_endianness) utils::endian_swap_mem(&image_data[0], slice_size, m_header.m_glTypeSize); num_image_bytes_remaining -= slice_size; total_mip_size += slice_size; } } } if (num_image_bytes_remaining) { VOGL_ASSERT_ALWAYS; return false; } } uint32_t num_mip_pad_bytes = disable_mip_and_cubemap_padding ? 0 : (3 - ((total_mip_size + 3) % 4)); if (serializer.read(pad_bytes, 1, num_mip_pad_bytes) != num_mip_pad_bytes) return false; } return true; }
bool ktx_texture::operator==(const ktx_texture &rhs) const { if (this == &rhs) return true; // This is not super deep because I want to avoid poking around into internal state (such as the header) #define CMP(x) \ if (x != rhs.x) \ return false; CMP(get_ogl_internal_fmt()); CMP(get_width()); CMP(get_height()); CMP(get_depth()); CMP(get_num_mips()); CMP(get_array_size()); CMP(get_num_faces()); CMP(is_compressed()); CMP(get_block_dim()); // The image fmt/type shouldn't matter with compressed textures. if (!is_compressed()) { CMP(get_ogl_fmt()); CMP(get_ogl_type()); } CMP(get_total_images()); CMP(get_opposite_endianness()); // Do an order insensitive key/value comparison. dynamic_string_array lhs_keys; get_keys(lhs_keys); dynamic_string_array rhs_keys; rhs.get_keys(rhs_keys); if (lhs_keys.size() != rhs_keys.size()) return false; lhs_keys.sort(dynamic_string_less_than_case_sensitive()); rhs_keys.sort(dynamic_string_less_than_case_sensitive()); for (uint32_t i = 0; i < lhs_keys.size(); i++) if (lhs_keys[i].compare(rhs_keys[i], true) != 0) return false; for (uint32_t i = 0; i < lhs_keys.size(); i++) { uint8_vec lhs_data, rhs_data; if (!get_key_value_data(lhs_keys[i].get_ptr(), lhs_data)) return false; if (!get_key_value_data(lhs_keys[i].get_ptr(), rhs_data)) return false; if (lhs_data != rhs_data) return false; } // Compare images. for (uint32_t l = 0; l < get_num_mips(); l++) { for (uint32_t a = 0; a < get_array_size(); a++) { for (uint32_t f = 0; f < get_num_faces(); f++) { for (uint32_t z = 0; z < get_depth(); z++) { const uint8_vec &lhs_img = get_image_data(l, a, f, z); const uint8_vec &rhs_img = rhs.get_image_data(l, a, f, z); if (lhs_img != rhs_img) return false; } } } } #undef CMP return true; }