bool vtTin::_ReadTinBody(FILE *fp, bool progress_callback(int)) { fseek(fp, m_file_data_start, SEEK_SET); // pre-allocate for efficiency m_vert.SetMaxSize(m_file_verts); m_tri.reserve(m_file_tris * 3); // read verts DPoint2 p; float z; for (int i = 0; i < m_file_verts; i++) { if (progress_callback != NULL && (i % 1024) == 0) progress_callback(i * 49 / m_file_verts); fread(&p.x, 8, 2, fp); // 2 doubles fread(&z, 4, 1, fp); // 1 float AddVert(p, z); } // read tris int tribuf[3]; for (int i = 0; i < m_file_tris; i++) { if (progress_callback != NULL && (i % 1024) == 0) progress_callback(50 + i * 49 / m_file_tris); fread(tribuf, 4, 3, fp); // 3 ints AddTri(tribuf[0], tribuf[1], tribuf[2]); } return true; }
/** * Write the TIN to a TIN (.itf) file (VTP-defined format). */ bool vtTin::Write(const char *fname, bool progress_callback(int)) const { FILE *fp = vtFileOpen(fname, "wb"); if (!fp) return false; char *wkt; OGRErr err = m_proj.exportToWkt(&wkt); if (err != OGRERR_NONE) { fclose(fp); return false; } int proj_len = strlen(wkt); int data_start = 5 + 4 + 4 + 4 + + 4 + proj_len + 32 + 4 + 4; int i; int verts = NumVerts(); int tris = NumTris(); fwrite("tin02", 5, 1, fp); // version 2 fwrite(&verts, 4, 1, fp); fwrite(&tris, 4, 1, fp); fwrite(&data_start, 4, 1, fp); fwrite(&proj_len, 4, 1, fp); fwrite(wkt, proj_len, 1, fp); OGRFree(wkt); // version 2 of the format has extents: left, top, right, bottom, min z, max h fwrite(&m_EarthExtents.left, sizeof(double), 4, fp); fwrite(&m_fMinHeight, sizeof(float), 1, fp); fwrite(&m_fMaxHeight, sizeof(float), 1, fp); // room for future extention: you can add fields here, as long as you // increase the data_start offset above accordingly // count progress int count = 0, total = verts + tris; // write verts for (i = 0; i < verts; i++) { fwrite(&m_vert[i].x, 8, 2, fp); // 2 doubles fwrite(&m_z[i], 4, 1, fp); // 1 float if (progress_callback && (++count % 100) == 0) progress_callback(count * 99 / total); } // write tris for (i = 0; i < tris; i++) { fwrite(&m_tri[i*3], 4, 3, fp); // 3 ints if (progress_callback && (++count % 100) == 0) progress_callback(count * 99 / total); } fclose(fp); return true; }
/** * Write the TIN to a Wavefront OBJ file. Note that we write X and Y as * geographic coordinates, but OBJ only supports single-precision floating * point values, so it may lose some precision. */ bool vtTin::WriteOBJ(const char *fname, bool progress_callback(int)) const { FILE *fp = vtFileOpen(fname, "wb"); if (!fp) return false; int i, count = 0; const int verts = NumVerts(); const int tris = NumTris(); const int total = verts + tris; fprintf(fp, "####\n"); fprintf(fp, "#\n"); fprintf(fp, "# OBJ File Generated by VTBuilder\n"); fprintf(fp, "#\n"); fprintf(fp, "####\n"); fprintf(fp, "# Object %s\n", fname); fprintf(fp, "#\n"); fprintf(fp, "# Vertices: %d\n", verts); fprintf(fp, "# Faces: %d\n", tris); fprintf(fp, "#\n"); fprintf(fp, "####\n"); // write verts for (i = 0; i < verts; i++) { fprintf(fp, "v %lf %lf %f\n", m_vert[i].x, m_vert[i].y, m_z[i]); if (progress_callback && (++count % 200) == 0) progress_callback(count * 99 / total); } fprintf(fp, "# %d vertices, 0 vertices normals\n", verts); fprintf(fp, "\n"); // write tris for (i = 0; i < tris; i++) { // Here is triangle definition (zero based) A B C ... // the indices in the file are 1-based, so add 1 fprintf(fp, "f %d %d %d\n", m_tri[i*3+0]+1, m_tri[i*3+1]+1, m_tri[i*3+2]+1); if (progress_callback && (++count % 200) == 0) progress_callback(count * 99 / total); } fprintf(fp, "# %d faces, 0 coords texture\n", tris); fprintf(fp, "\n"); fprintf(fp, "# End of File\n"); fclose(fp); return true; }
void CNodeDefManager::updateTextures(IGameDef *gamedef, void (*progress_callback)(void *progress_args, u32 progress, u32 max_progress), void *progress_callback_args) { #ifndef SERVER infostream << "CNodeDefManager::updateTextures(): Updating " "textures in node definitions" << std::endl; Client *client = (Client *)gamedef; ITextureSource *tsrc = client->tsrc(); IShaderSource *shdsrc = client->getShaderSource(); scene::IMeshManipulator *meshmanip = RenderingEngine::get_scene_manager()->getMeshManipulator(); TextureSettings tsettings; tsettings.readSettings(); u32 size = m_content_features.size(); for (u32 i = 0; i < size; i++) { ContentFeatures *f = &(m_content_features[i]); f->updateTextures(tsrc, shdsrc, meshmanip, client, tsettings); progress_callback(progress_callback_args, i, size); } #endif }
/** Expects a rows for each data point seperated by semicolons, in order of increasing z,y,x with x cycling fastest. */ int Dataset::read_channel5(const char name[]) { FILE *fi; char line[LINE_BUFFER_SIZE]; char *buff = NULL; EULER e; int i = 0, j = 0, k = 0; int pf = nv/50; Point *c; if ((fi = fopen(name,"r")) == NULL) { return 0; } else { //Ignore header line fgets(line, LINE_BUFFER_SIZE, fi); data = (Point*) malloc(sizeof(Point)*nv); if (data == NULL) { return 0; } c = data; //Cycle through values while ( i++ < nv ) { allow_py_threading; if (fgets(line, LINE_BUFFER_SIZE, fi) == NULL ) { break; } block_py_threading; buff = strtok(line,";"); for (j=EULER_COL;j--;) buff = strtok(NULL,";"); e[0] = strtod(buff,&buff); e[1] = strtod(buff+3,&buff); e[2] = strtod(buff+3,&buff); c->mad = strtod(buff+3,&buff); c->bc = strtod(buff+3,&buff); c->grain = -1; c->external = false; c->orientation = Orientation(e[0],e[1],e[2]); c->orientation.reduce_zone(); c++; if (i % pf == 0) { progress_callback((float)i/nv, "Reading file", false); //callback->progress((float)i/nv); } } fclose(fi); return 1; } }
/** * Write the TIN to a Stanford Polygon File Format (PLY), * http://en.wikipedia.org/wiki/PLY_(file_format) */ bool vtTin::WritePLY(const char *fname, bool progress_callback(int)) const { FILE *fp = vtFileOpen(fname, "wb"); if (!fp) return false; int i, count = 0; int verts = NumVerts(); int tris = NumTris(); int total = verts + tris; fprintf(fp, "ply\n"); fprintf(fp, "format ascii 1.0\n"); fprintf(fp, "comment VTBuilder generated\n"); fprintf(fp, "element vertex %d\n", verts); fprintf(fp, "property float x\n"); fprintf(fp, "property float y\n"); fprintf(fp, "property float z\n"); fprintf(fp, "element face %d\n", tris); fprintf(fp, "property list uchar int vertex_indices\n"); fprintf(fp, "end_header\n"); // write verts for (i = 0; i < verts; i++) { fprintf(fp, "%lf %lf %f\n", m_vert[i].x, m_vert[i].y, m_z[i]); if (progress_callback && (++count % 200) == 0) progress_callback(count * 99 / total); } // write tris for (i = 0; i < tris; i++) { // Here is triangle definition (zero based) A B C ... fprintf(fp, "3 %d %d %d\n", m_tri[i*3+0], m_tri[i*3+1], m_tri[i*3+2]); if (progress_callback && (++count % 200) == 0) progress_callback(count * 99 / total); } fclose(fp); return true; }
// passphrase must be at most 256 characters or code may crash void mnemonic_to_seed(const char *mnemonic, const char *passphrase, uint8_t seed[512 / 8], void (*progress_callback)(uint32_t current, uint32_t total)) { int passphraselen = strlen(passphrase); #if USE_BIP39_CACHE int mnemoniclen = strlen(mnemonic); // check cache if (mnemoniclen < 256 && passphraselen < 64) { for (int i = 0; i < BIP39_CACHE_SIZE; i++) { if (!bip39_cache[i].set) continue; if (strcmp(bip39_cache[i].mnemonic, mnemonic) != 0) continue; if (strcmp(bip39_cache[i].passphrase, passphrase) != 0) continue; // found the correct entry memcpy(seed, bip39_cache[i].seed, 512 / 8); return; } } #endif uint8_t salt[8 + 256]; memcpy(salt, "mnemonic", 8); memcpy(salt + 8, passphrase, passphraselen); PBKDF2_HMAC_SHA512_CTX pctx; pbkdf2_hmac_sha512_Init(&pctx, (const uint8_t *)mnemonic, strlen(mnemonic), salt, passphraselen + 8); if (progress_callback) { progress_callback(0, BIP39_PBKDF2_ROUNDS); } for (int i = 0; i < 8; i++) { pbkdf2_hmac_sha512_Update(&pctx, BIP39_PBKDF2_ROUNDS / 8); if (progress_callback) { progress_callback((i + 1) * BIP39_PBKDF2_ROUNDS / 8, BIP39_PBKDF2_ROUNDS); } } pbkdf2_hmac_sha512_Final(&pctx, seed); #if USE_BIP39_CACHE // store to cache if (mnemoniclen < 256 && passphraselen < 64) { bip39_cache[bip39_cache_index].set = true; strcpy(bip39_cache[bip39_cache_index].mnemonic, mnemonic); strcpy(bip39_cache[bip39_cache_index].passphrase, passphrase); memcpy(bip39_cache[bip39_cache_index].seed, seed, 512 / 8); bip39_cache_index = (bip39_cache_index + 1) % BIP39_CACHE_SIZE; } #endif }
int vtStructureArray::AddFoundations(vtHeightField *pHF, bool progress_callback(int)) { vtLevel *pLev, *pNewLev; int i, j, pts, built = 0; float fElev; int selected = NumSelected(); int size = GetSize(); VTLOG("AddFoundations: %d selected, %d total, ", selected, size); for (i = 0; i < size; i++) { if (progress_callback != NULL) progress_callback(i * 99 / size); vtStructure *str = GetAt(i); vtBuilding *bld = str->GetBuilding(); if (!bld) continue; if (selected > 0 && !str->IsSelected()) continue; // Get the outer footprint of the lowest level pLev = bld->GetLevel(0); const DLine2 &foot = pLev->GetOuterFootprint(); pts = foot.GetSize(); float fMin = 1E9, fMax = -1E9; for (j = 0; j < pts; j++) { pHF->FindAltitudeOnEarth(foot.GetAt(j), fElev); if (fElev < fMin) fMin = fElev; if (fElev > fMax) fMax = fElev; } float fDiff = fMax - fMin; // if there's less than 50cm of depth, don't bother building // a foundation if (fDiff < 0.5f) continue; // Create and add a foundation level pNewLev = new vtLevel; pNewLev->m_iStories = 1; pNewLev->m_fStoryHeight = fDiff; bld->InsertLevel(0, pNewLev); bld->SetFootprint(0, foot); pNewLev->SetEdgeMaterial(BMAT_NAME_CEMENT); pNewLev->SetEdgeColor(RGBi(255, 255, 255)); built++; } VTLOG("%d added.\n", built); return built; }
/** * Use the height data in the grid and a colormap fill a bitmap with colors. * Any undefined heixels in the source will be fill with red (255,0,0). * * \param pBM The bitmap to be colored. * \param color_map A ColorMap which has already had GenerateColorTable() called. * \param nodata The color to use for NODATA areas, where there are no elevation values. * \param progress_callback If supplied, this function will be called back * with a value of 0 to 100 as the operation progresses. * * \return true if any invalid elevation values were encountered. */ bool vtHeightFieldGrid3d::ColorDibFromTable(vtBitmapBase *pBM, const ColorMap *color_map, const RGBAi &nodata, bool progress_callback(int)) const { VTLOG1(" ColorDibFromTable:"); const IPoint2 bitmap_size = pBM->GetSize(); int depth = pBM->GetDepth(); VTLOG(" dib size %d x %d, grid %d x %d.. ", bitmap_size.x, bitmap_size.y, m_iSize.x, m_iSize.y); const bool bExact = (bitmap_size == m_iSize); double ratiox = (double)(m_iSize.x - 1)/(bitmap_size.x - 1), ratioy = (double)(m_iSize.y - 1)/(bitmap_size.y - 1); bool has_invalid = false; const RGBi nodata_24bit(nodata.r, nodata.g, nodata.b); float elev; // now iterate over the texels for (int i = 0; i < bitmap_size.x; i++) { if (progress_callback != NULL && (i&40) == 0) progress_callback(i * 100 / bitmap_size.x); // find the corresponding location in the height grid const double x = i * ratiox; for (int j = 0; j < bitmap_size.y; j++) { const double y = j * ratioy; if (bExact) elev = GetElevation(i, j, true); // Always use true elevation else elev = GetInterpolatedElevation(x, y, true); // Always use true elevation if (elev == INVALID_ELEVATION) { if (depth == 32) pBM->SetPixel32(i, bitmap_size.y - 1 - j, nodata); else pBM->SetPixel24(i, bitmap_size.y - 1 - j, nodata_24bit); has_invalid = true; continue; } const RGBi &rgb = color_map->ColorFromTable(elev); if (depth == 32) pBM->SetPixel32(i, bitmap_size.y - 1 - j, rgb); else pBM->SetPixel24(i, bitmap_size.y - 1 - j, rgb); } } VTLOG("Done.\n"); return has_invalid; }
/** * Write the TIN to the GMS format. Historically GMS stood for 'Groundwater * Modeling System' from the EMS-I company, now called Aquaveo. */ bool vtTin::WriteGMS(const char *fname, bool progress_callback(int)) const { FILE *fp = vtFileOpen(fname, "wb"); if (!fp) return false; // first line is file identifier fprintf(fp, "TIN\n"); fprintf(fp, "BEGT\n"); fprintf(fp, "ID 1\n"); // Indices start at 1 //fprintf(fp, "TNAM tin\n"); // "name" of the TIN; optional //fprintf(fp, "MAT 1\n"); // "TIN material ID"; optional int count = 0; const int verts = NumVerts(); const int tris = NumTris(); const int total = verts + tris; // write verts fprintf(fp, "VERT %d\n", verts); for (int i = 0; i < verts; i++) { fprintf(fp, "%lf %lf %f\n", m_vert[i].x, m_vert[i].y, m_z[i]); if (progress_callback && (++count % 200) == 0) progress_callback(count * 99 / total); } // write tris fprintf(fp, "TRI %d\n", tris); for (int i = 0; i < tris; i++) { // the indices in the file are 1-based, so add 1 fprintf(fp, "%d %d %d\n", m_tri[i*3+0]+1, m_tri[i*3+1]+1, m_tri[i*3+2]+1); if (progress_callback && (++count % 200) == 0) progress_callback(count * 99 / total); } fprintf(fp, "ENDT\n"); fclose(fp); return true; }
JNIEXPORT jint JNICALL Java_com_thingsbook_it_NativeGit_doClone (JNIEnv * env, jclass cls, jstring url, jstring localPath) { git_threads_init(); // set variables to allow calling java function from here mid_callback = (*env)->GetStaticMethodID(env, cls, "progressCallback", sigStr); if (mid_callback == 0) { LOGD("No method ID found for callback function"); return; } this_env = env; java_class = cls; progress_data pd = {{0}}; git_repository *cloned_repo = NULL; git_clone_options clone_opts = GIT_CLONE_OPTIONS_INIT; git_checkout_options checkout_opts = GIT_CHECKOUT_OPTIONS_INIT; // Set up options checkout_opts.checkout_strategy = GIT_CHECKOUT_SAFE_CREATE; checkout_opts.progress_cb = checkout_progress; checkout_opts.progress_payload = &pd; clone_opts.checkout_opts = checkout_opts; clone_opts.remote_callbacks.transfer_progress = &fetch_progress; clone_opts.remote_callbacks.payload = &pd; int error; LOGD("cloning repository ..."); progress_callback("getting product information ...", 0); const char *c_url = (*env)->GetStringUTFChars(env, url, NULL); const char *c_local_path = (*env)->GetStringUTFChars(env, localPath, NULL); LOGD("%s", c_url); LOGD("%s", c_local_path); // actually call library to clone repository git_clone(&cloned_repo , c_url, c_local_path, &clone_opts); // if (error != 0) { // check_error(error, "cloning repository"); // } // else if (cloned_repo) { // git_repository_free(cloned_repo); // } // // return error; }
void Countries::Free(bool progress_callback(int)) { // manually free memory uint size = m_countries.GetSize(); for (int unsigned i = 0; i < size; i++) { if (progress_callback != NULL) progress_callback(i * 100 / size); Country *country = m_countries[i]; delete country; } m_countries.SetSize(0); }
/** * If you are going to do a large number of height-testing of this TIN * (with FindAltitudeOnEarth), call this method once first to set up a * series of indexing bins which greatly speed up testing. * * \param bins Number of bins per dimension, e.g. a value of 50 produces * 50*50=2500 bins. More bins produces faster height-testing with * the only tradeoff being a small amount of RAM per bin. * \param progress_callback If supplied, this function will be called back * with a value of 0 to 100 as the operation progresses. */ void vtTin::SetupTriangleBins(int bins, bool progress_callback(int)) { DRECT rect = m_EarthExtents; m_BinSize.x = rect.Width() / bins; m_BinSize.y = rect.Height() / bins; delete m_trianglebins; m_trianglebins = new BinArray(bins, bins); uint tris = NumTris(); for (uint i = 0; i < tris; i++) { if ((i%100)==0 && progress_callback) progress_callback(i * 100 / tris); // get 2D points const DPoint2 &p1 = m_vert[m_tri[i*3]]; const DPoint2 &p2 = m_vert[m_tri[i*3+1]]; const DPoint2 &p3 = m_vert[m_tri[i*3+2]]; // find the correct range of bins, and add the index of this index to it DPoint2 fminrange, fmaxrange; fminrange.x = std::min(std::min(p1.x, p2.x), p3.x); fmaxrange.x = std::max(std::max(p1.x, p2.x), p3.x); fminrange.y = std::min(std::min(p1.y, p2.y), p3.y); fmaxrange.y = std::max(std::max(p1.y, p2.y), p3.y); IPoint2 bin_start, bin_end; bin_start.x = (uint) ((fminrange.x-rect.left) / m_BinSize.x); bin_end.x = (uint) ((fmaxrange.x-rect.left) / m_BinSize.x); bin_start.y = (uint) ((fminrange.y-rect.bottom) / m_BinSize.y); bin_end.y = (uint) ((fmaxrange.y-rect.bottom) / m_BinSize.y); for (int j = bin_start.x; j <= bin_end.x; j++) { for (int k = bin_start.y; k <= bin_end.y; k++) { Bin *bin = m_trianglebins->GetBin(j, k); if (bin) bin->push_back(i); } } } }
void CNodeDefManager::updateTextures(IGameDef *gamedef, void (*progress_callback)(void *progress_args, u32 progress, u32 max_progress), void *progress_callback_args) { #ifndef SERVER infostream << "CNodeDefManager::updateTextures(): Updating " "textures in node definitions" << std::endl; ITextureSource *tsrc = gamedef->tsrc(); IShaderSource *shdsrc = gamedef->getShaderSource(); scene::ISceneManager* smgr = gamedef->getSceneManager(); scene::IMeshManipulator* meshmanip = smgr->getMeshManipulator(); TextureSettings tsettings; tsettings.readSettings(); u32 size = m_content_features.size(); for (u32 i = 0; i < size; i++) { m_content_features[i].updateTextures(tsrc, shdsrc, smgr, meshmanip, gamedef, tsettings); progress_callback(progress_callback_args, i, size); } #endif }
bool Countries::ReadGCF(const char *fname, bool progress_callback(int)) { FILE *fp = vtFileOpen(fname, "rb"); if (!fp) return false; int num; fread(&num, sizeof(int), 1, fp); m_countries.SetMaxSize(num); int i, j, num_places; for (i = 0; i < num; i++) { if (progress_callback != NULL) progress_callback(i * 100 / num); Country *country = new Country; m_countries.Append(country); ReadString(fp, country->m_full); printf("Reading %s...\n", (const char *) country->m_full); fread(&num_places, sizeof(int), 1, fp); country->m_places.SetMaxSize(num_places); for (j = 0; j < num_places; j++) { Place *place = new Place; fread(&place->m_pos.x, sizeof(double), 2, fp); ReadString(fp, place->m_fullname_nd); country->m_places.Append(place); } } fclose(fp); return true; }
static void print_progress(const progress_data *pd) { int network_percent = (100*pd->fetch_progress.received_objects) / pd->fetch_progress.total_objects; int index_percent = (100*pd->fetch_progress.indexed_objects) / pd->fetch_progress.total_objects; int checkout_percent = pd->total_steps > 0 ? (100 * pd->completed_steps) / pd->total_steps : 0; float progress_percent = (network_percent + index_percent + checkout_percent) / 3.0; int kbytes = pd->fetch_progress.received_bytes / 1024; char output[50]; LOGD("progress percentage: %.2f", progress_percent); if (pd->fetch_progress.received_objects == pd->fetch_progress.total_objects) { sprintf(output, "Resolving deltas %d/%d\r", pd->fetch_progress.indexed_deltas, pd->fetch_progress.total_deltas); } else { sprintf(output, "net %3d%% (%4d kb, %5d/%5d)", network_percent, kbytes, pd->fetch_progress.received_objects, pd->fetch_progress.total_objects); } progress_callback(output, progress_percent); }
/* Core code contributed by Kevin Behilo, 2/20/04. * * Possible TODO: add code to soften and blend shadow edges * (see aliasing comments in source). * * Definite TODO: the whole thing can be sped up by precalculating the * surface normals once. In fact that should be placed in a separate Shading * Context, so that it could be re-used for quickly re-shading multiple times. */ void vtHeightFieldGrid3d::ShadowCastDib(vtBitmapBase *pBM, const FPoint3 &light_dir, float fLightFactor, float fAmbient, bool progress_callback(int)) const { const IPoint2 bitmap_size = pBM->GetSize(); // Compute area that we will sample for shading, bounded by the texel // centers, which are 1/2 texel in from the grid extents. const DPoint2 texel_size(m_EarthExtents.Width() / bitmap_size.x, m_EarthExtents.Height() / bitmap_size.y); DRECT texel_area = m_EarthExtents; texel_area.Grow(-texel_size.x/2, -texel_size.y/2); const DPoint2 texel_base(texel_area.left, texel_area.bottom); const bool b8bit = (pBM->GetDepth() == 8); // These values are hardcoded here but could be exposed in the GUI const float sun = 0.7f; // If we have light that's pointing UP, rather than down at the terrain, // then it's only going to take a really long time to produce a // completely dark terrain. We can catch this case up front. if (light_dir.y > 0) { for (int i = 0; i < bitmap_size.x; i++) { for (int j = 0; j < bitmap_size.y; j++) { if (b8bit) pBM->ScalePixel8(i, j, fAmbient); else pBM->ScalePixel24(i, j, fAmbient); } } return; } // Create array to hold flags LightMap lightmap(bitmap_size.x, bitmap_size.y); // This factor is used when applying shading to non-shadowed areas to // try and keep the "contrast" down to a min. (still get "patches" of // dark/light spots though). // It is initialized to 1.0, because in case there are no shadows at all // (such as at noon) we still need a reasonable value. float darkest_shadow = 1.0; // For the vector used to cast shadows, we need it in grid coordinates, // which are (Column,Row) where Row is north. But the direction passed // in uses OpenGL coordinates where Z is south. So flip Z. FPoint3 grid_light_dir = light_dir; grid_light_dir.z = -grid_light_dir.z; // Scale the light vector such that the X or Z component (whichever is // larger) is 1. This is will serve as our direction vector in grid // coordinates, when drawing a line across the grid to cast the shadow. // // Code adapted from aaron_torpy: // http://www.geocities.com/aaron_torpy/algorithms.htm // float f, HScale; if ( fabs(grid_light_dir.x) > fabs(grid_light_dir.z) ) { HScale = m_fStep.x; f = fabs(light_dir.x); } else { HScale = m_fStep.y; f = fabs(light_dir.z); } grid_light_dir /= f; int i_init, i_final, i_incr; int j_init, j_final, j_incr; if (grid_light_dir.x > 0) { i_init=0; i_final=bitmap_size.x; i_incr=1; } else { i_init=bitmap_size.x-1; i_final=-1; i_incr=-1; } if (grid_light_dir.z > 0) { j_init=0; j_final=bitmap_size.y; j_incr=1; } else { j_init=bitmap_size.y-1; j_final=-1; j_incr=-1; } // First pass: find each point that it is in shadow. DPoint2 pos; float shadowheight, elevation; FPoint3 normal; FPoint3 p3; int x, z; float shade; for (int j = j_init; j != j_final; j += j_incr) { if (progress_callback != NULL && (j%20) == 0) progress_callback(abs(j-j_init) * 100 / bitmap_size.y); for (int i = i_init; i != i_final; i += i_incr) { pos = GridPos(texel_base, texel_size, i, j); FindAltitudeOnEarth(pos, shadowheight, true); if (shadowheight == INVALID_ELEVATION) { // set a flag so we won't visit this one again lightmap.Set(i, j, 1); continue; } bool Under_Out = false; for (int k = 1; Under_Out == false; k++) { x = (int) (i + grid_light_dir.x*k + 0.5f); z = (int) (j + grid_light_dir.z*k + 0.5f); shadowheight += grid_light_dir.y * HScale; if ((x<0) || (x>bitmap_size.x-1) || (z<0) || (z>bitmap_size.y-1)) { Under_Out = true; // Out of the grid break; } pos = GridPos(texel_base, texel_size, x, z); FindAltitudeOnEarth(pos, elevation, true); // skip holes in the grid if (elevation == INVALID_ELEVATION) continue; if (elevation > shadowheight) { if (k>1) Under_Out = true; // Under the terrain break; } // Combine color and shading. // Only do shadow if we have not shaded this i,j before. if (lightmap.Get(x,z) < 1) { // 3D elevation query to get slope m_LocalCS.EarthToLocal(pos, p3.x, p3.z); FindAltitudeAtPoint(p3, p3.y, true, 0, &normal); //***************************************** // Here the Sun(r, g, b) = 0 because we are in the shade // therefore I(r, g, b) = Amb(r, g, b) * (0.5*N[z] + 0.5) // shade = sun*normal.Dot(-light_direction) + fAmbient * (0.5f*normal.y + 0.5f); shade = fAmbient * (0.5f*normal.y + 0.5f); //***************************************** //***************************************** if (darkest_shadow > shade) darkest_shadow = shade; // Rather than doing the shading at this point we may want to // simply save the value into the LightMap array. Then apply // some anti-aliasing or edge softening algorithm to the LightMap. // Once that's done, apply the whole LightMap to the DIB. if (b8bit) pBM->ScalePixel8(x, bitmap_size.y-1-z, shade); else pBM->ScalePixel24(x, bitmap_size.y-1-z, shade); // set a flag to show that this texel has been shaded. // (or set to value of the shading - see comment above) lightmap.Set(x, z, lightmap.Get(x, z)+1); } } } //for i } //for j // For dot-product lighting, we use the normal 3D vector, only inverted // so that we can compare it to the upward-pointing ground normals. const FPoint3 inv_light_dir = -light_dir; // Second pass. Now we are going to loop through the LightMap and apply // the full lighting formula to each texel that has not been shaded yet. for (int j = 0; j < bitmap_size.y; j++) { if (progress_callback != NULL && (j%20) == 0) progress_callback(j * 100 / bitmap_size.y); for (int i = 0; i < bitmap_size.x; i++) { if (lightmap.Get(i, j) > 0) continue; pos = GridPos(texel_base, texel_size, i, j); // 2D elevation query to check for holes in the grid FindAltitudeOnEarth(pos, elevation, true); if (elevation == INVALID_ELEVATION) continue; // 3D elevation query to get slope m_LocalCS.EarthToLocal(pos, p3.x, p3.z); FindAltitudeAtPoint(p3, p3.y, true, 0, &normal); //***************************************** //***************************************** //shade formula based on: //http://www.geocities.com/aaron_torpy/algorithms.htm#calc_intensity // The Amb value was arbitrarily chosen // Need to experiment more to determine the best value // Perhaps calculating Sun(r, g, b) and Amb(r, g, b) for a // given time of day (e.g. warmer colors close to sunset) // or give control to user since textures will differ // I(r, g, b) = Sun(r, g, b) * scalarprod(N, v) + Amb(r, g, b) * (0.5*N[z] + 0.5) shade = sun * normal.Dot(inv_light_dir); // It's a reasonable assuption that an angle of 45 degrees is // sufficient to fully illuminate the ground. shade /= .7071f; // Now add ambient component shade += fAmbient * (0.5f*normal.y + 0.5f); // Maybe clipping values can be exposed to the user as well. // Clip - don't shade down below lowest ambient level if (shade < darkest_shadow) shade = darkest_shadow; else if (shade > 1.2f) shade = 1.2f; // Push the value of 'shade' toward 1.0 by the fLightFactor factor. // This means that fLightFactor=0 means no lighting, 1 means full lighting. float diff = 1 - shade; diff = diff * (1 - fLightFactor); shade += diff; // Rather than doing the shading at this point we may want to // simply save the value into the LightMap array. Then apply // some anti-aliasing or edge softening algorithm to the LightMap. // Once that's done, apply the whole LightMap to the DIB. // LightMap[I][J]= shade; // set to value of the shading - see comment above) if (b8bit) pBM->ScalePixel8(i, bitmap_size.y-1-j, shade); else pBM->ScalePixel24(i, bitmap_size.y-1-j, shade); } } // Possible TODO: Apply edge softening algorithm (?) }
/** * Quickly produce a shading-like effect by scanning over the bitmap once, * using the east-west slope to produce lightening/darkening. * The bitmap must be the same size as the elevation grid, or a power of 2 smaller. */ void vtHeightFieldGrid3d::ShadeQuick(vtBitmapBase *pBM, float fLightFactor, bool bTrue, bool progress_callback(int)) { const IPoint2 bitmap_size = pBM->GetSize(); const int depth = pBM->GetDepth(); const int stepx = m_iSize.x / bitmap_size.x; const int stepy = m_iSize.y / bitmap_size.y; RGBi rgb; RGBAi rgba; for (int j = 0; j < bitmap_size.y; j++) { if (progress_callback != NULL && (j%40) == 0) progress_callback(j * 100 / bitmap_size.y); // find corresponding location in heightfield const int y = m_iSize.y-1 - (j * stepy); for (int i = 0; i < bitmap_size.x; i++) { if (depth == 32) pBM->GetPixel32(i, j, rgba); else pBM->GetPixel24(i, j, rgb); int x_offset = 0; if (i == bitmap_size.x-1) x_offset = -1; // index into elevation const int x = i * stepx; float value = GetElevation(x + x_offset, y, bTrue); if (value == INVALID_ELEVATION) { // Do not touch pixels in nodata areas continue; } float value2 = GetElevation(x+1 + x_offset, y, bTrue); if (value2 == INVALID_ELEVATION) value2 = value; short diff = (short) ((value2 - value) / m_fStep.x * fLightFactor); // clip to keep values under control if (diff > 128) diff = 128; else if (diff < -128) diff = -128; if (depth == 32) { rgba.r += diff; rgba.g += diff; rgba.b += diff; if (rgba.r < 0) rgba.r = 0; else if (rgba.r > 255) rgba.r = 255; if (rgba.g < 0) rgba.g = 0; else if (rgba.g > 255) rgba.g = 255; if (rgba.b < 0) rgba.b = 0; else if (rgba.b > 255) rgba.b = 255; pBM->SetPixel32(i, j, rgba); } else { rgb.r = rgb.r + diff; rgb.g = rgb.g + diff; rgb.b = rgb.b + diff; if (rgb.r < 0) rgb.r = 0; else if (rgb.r > 255) rgb.r = 255; if (rgb.g < 0) rgb.g = 0; else if (rgb.g > 255) rgb.g = 255; if (rgb.b < 0) rgb.b = 0; else if (rgb.b > 255) rgb.b = 255; pBM->SetPixel24(i, j, rgb); } } } }
/** * Perform simple shading of a bitmap, based on this grid's elevation values. * Lighting is computing using the dot product of the surface normal with * the light direction. This is often called "dot-product lighting". * * \param pBM The bitmap to shade. * \param light_dir Direction vector of the light. * \param fLightFactor Value from 0 (no shading) to 1 (full shading) * \param fAmbient Ambient light values from 0 to 1, a typical value is 0.1. * \param fGamma Gamma values from 0 to 1, values less than 1 boost the brightness curve. * \param bTrue If true, use the real elevation values, ignoring vertical exaggeration. * \param progress_callback If supplied, will be called with values from 0 to 100. */ void vtHeightFieldGrid3d::ShadeDibFromElevation(vtBitmapBase *pBM, const FPoint3 &light_dir, float fLightFactor, float fAmbient, float fGamma, bool bTrue, bool progress_callback(int)) const { // consider upward-pointing normal vector, rather than downward-pointing FPoint3 light_direction = -light_dir; const IPoint2 bitmap_size = pBM->GetSize(); const double ratiox = (double)(m_iSize.x - 1) / (bitmap_size.x - 1), ratioy = (double)(m_iSize.y - 1) / (bitmap_size.y - 1); // For purposes of shading, we need to look at adjacent heixels which are // at least one grid cell away: int xOffset = (int)ratiox; int yOffset = (int)ratioy; if (xOffset < 1) xOffset = 1; if (yOffset < 1) yOffset = 1; const int depth = pBM->GetDepth(); // Center, Left, Right, Top, Bottom FPoint3 c, l, r, t, b, v3; // iterate over the texels for (int j = 0; j < bitmap_size.y; j++) { if (progress_callback != NULL && (j%40) == 0) progress_callback(j * 100 / bitmap_size.y); // find corresponding location in terrain const int y = (int) (j * ratioy); for (int i = 0; i < bitmap_size.x; i++) { const int x = (int) (i * ratiox); GetWorldLocation(x, y, c, bTrue); if (c.y == INVALID_ELEVATION) continue; // Check to see what surrounding values are valid GetWorldLocation(x-xOffset, y, l, bTrue); GetWorldLocation(x+xOffset, y, r, bTrue); GetWorldLocation(x, y+yOffset, t, bTrue); GetWorldLocation(x, y-yOffset, b, bTrue); FPoint3 p1, p2, p3, p4; // compute surface normal if (l.y != INVALID_ELEVATION) p1 = l; else p1 = c; if (r.y != INVALID_ELEVATION) p2 = r; else p2 = c; if (t.y != INVALID_ELEVATION) p3 = t; else p3 = c; if (b.y != INVALID_ELEVATION) p4 = b; else p4 = c; #if 0 // The naive way is to use the surface vectors and cross them to // find the normal. FPoint3 v1 = p2 - p1; FPoint3 v2 = p3 - p4; // This provides some 'exaggeration' for the lighting v1.y *= fLightFactor; v2.y *= fLightFactor; v3 = v1.Cross(v2); #else // This is equivalent to the cross product, and the overall // render is 2% faster v3.Set((p1.y - p2.y)*fLightFactor/(p2.x - p1.x), 1, (p3.y - p4.y)*fLightFactor/(p4.z - p3.z)); #endif v3.Normalize(); float shade = v3.Dot(light_direction); // shading 0 (dark) to 1 (light) // Most of the values are in the bottom half of the 0-1 range, so push // them upwards with a gamma factor. if (fGamma != 1.0f) shade = powf(shade, fGamma); // boost with ambient light shade += fAmbient; // Never shade below zero, can cause RGB wraparound if (shade < 0) shade = 0; if (shade > 1.1f) shade = 1.1f; // combine color and shading if (depth == 8) pBM->ScalePixel8(i, bitmap_size.y-1-j, shade); else if (depth == 24) pBM->ScalePixel24(i, bitmap_size.y-1-j, shade); else if (depth == 32) pBM->ScalePixel32(i, bitmap_size.y-1-j, shade); } } }
/// Calcuate ellipsoids with same moments of inertia, centroids as grains int Dataset::calculate_ellipses() { int i; int done = 0; //#pragma omp parallel for for(i=0;i<num_grains;i++){ int j,k, index; SymmetricMatrix I2(3); I2 = 0; DiagonalMatrix D(3); double I1[3], A[3], I0, p[3], c[3], ax[3], I2x[3]; class GrainList *list; progress_callback((float)done/num_grains, "Calculating ellipses", false); I0 = grains[i].volume; I1[0] = 0; I1[1] = 0; I1[2] = 0; if(grains[i].volume > 1){ // Calculate 1st moments of inertia //-------------------------------------------------- for(list=grains[i].list_head; list!=NULL; list=list->next){ index = list->index; p[X] = (index % tx) *(double)steps[X]; p[Y] = ((index/dy) % ty)*(double)steps[Y]; p[Z] = ((index/dz) % tz)*(double)steps[Z]; for(j=0;j<3;j++){ I1[j] += p[j]; } } // Calculate centroid //-------------------------------------------------- for(j=0;j<3;j++){ c[j] = I1[j]/I0; } // Calculate 1st, 2nd moments of inertia //-------------------------------------------------- for(list=grains[i].list_head; list!=NULL; list=list->next){ index = list->index; p[X] = (index % tx) *(double)steps[X] - c[X]; p[Y] = ((index/dy) % ty)*(double)steps[Y] - c[Y]; p[Z] = ((index/dz) % tz)*(double)steps[Z] - c[Z]; I2(1,1) += p[1]*p[1] + p[2]*p[2]; // Newmat library uses I2(2,2) += p[0]*p[0] + p[2]*p[2]; // 1-indexed arrays I2(3,3) += p[0]*p[0] + p[1]*p[1]; I2(1,2) -= p[0]*p[1]; I2(1,3) -= p[0]*p[2]; I2(2,3) -= p[1]*p[2]; } for(j=1;j<=3;j++){ I2(j,j) = I2(j,j)/I0; for(k=j+1;k<=3;k++){ I2(j,k) = I2(j,k)/I0; } } // Diagonalise matrix Jacobi(I2,D); // Calcuate axes //-------------------------------------------------- for(j=0;j<3;j++){ A[j] = 15/(8*PI)*( D(j+1,j+1) + D((j+1)%3+1,(j+1)%3+1) - D((j+2)%3+1,(j+2)%3+1) ); } I2x[0] = D(1,1); I2x[1] = D(2,2); I2x[2] = D(3,3); for(j=0;j<3;j++){ ax[j] = sqrt(2.5*(I2x[(j+1)%3]+I2x[(j+2)%3]-I2x[j])); } for(j=0;j<3;j++){ grains[i].centroid[j] = c[j]; grains[i].axis[j] = ax[j]; //pow(pow(A[j],4)/(A[(j+1)%3]*A[(j+2)%3]),0.1); } // Sort axes such that a>b>c qsort(grains[i].axis,3,sizeof(float),compare_floats); if (isnan(grains[i].axis[0]) || isnan(grains[i].axis[1]) || isnan(grains[i].axis[2]) ) { //|| //isinf(grains[i].axis[0]) || isinf(grains[i].axis[1]) || isinf(grains[i].axis[2])) { grains[i].axis[0] = 1; grains[i].axis[1] = 1; grains[i].axis[2] = 1; } } #pragma omp atomic done++; } progress_callback(1, "Calculating ellipses", false); return 1; }
/// Calculate average orientation of each grain int Dataset::average_orientations() { int i; //#pragma omp parallel for for(i=0;i<num_grains;i++){ int k; Point *d0, *d1, *d; int volume, valid_count; long double avg[4]; long double q[4], n; if (i%100 == 0) { //#pragma omp critial(display) progress_callback((float)i / num_grains,"Calculating grain averages", false); } volume = grains[i].volume; for(k=0;k<4;k++){ avg[k] = 0; } d0 = data + grains[i].list_head->index; d0->orientation.reduce_zone(); class GrainList *list, *list_next; d = data + grains[i].list_head->index; int qq = 0; valid_count = 0; // Cycle through each voxel in grain and calculate average //-------------------------------------------------- for(list=grains[i].list_head; list!=NULL && valid_count < 100; list=list->next){ list_next = list->next; d0 = data + list->index; if(d0->orientation.is_non_zero()){ for(k=0;k<4;k++){ avg[k] += (long double)d0->orientation.q[k]; assert(!isnan(avg[k])); } valid_count++; } qq++; // Place each voxel in the same cloud as the previous //-------------------------------------------------- if (list->next!=NULL) { d1 = data + list_next->index; d1->orientation.reduce_zone(&(d->orientation)); } } if (valid_count == 0){ grains[i].orientation.set_zero(); } else { // Renormalise average value and reduce to fundamental zone //-------------------------------------------------- n = 0; for(k=0;k<4;k++){ q[k] = avg[k]/valid_count; assert(!isnan(q[k])); n = n + q[k]*q[k]; } n = sqrt(n); for(k=0;k<4;k++){ q[k] = q[k]/n; } grains[i].orientation = q; grains[i].orientation.reduce_zone(); } } progress_callback(1,"Calculating grain averages", false); return 1; }
/** Uses modified marching cubes algorithm */ int Dataset::calculate_surface() { Gridcell grid; int index, in, i,j,k; int num_triangles = 0, new_triangles = 0, *buffer_count; buffer_count = (int*) malloc(sizeof(int)*num_grains); if (buffer_count == NULL) { return 0; } double sa = 0, AB[3], AC[3], ABAC; Triangle *t; bool already_marked; // For each grain allocate initial buffer and set surface count to zero //-------------------------------------------------- for(i=0;i<num_grains;i++){ if (grains[i].surface_triangles != NULL) { free(grains[i].surface_triangles); } grains[i].surface_triangles = (Triangle*) malloc(sizeof(Triangle)*TRIANGLE_BUFFER); if (grains[i].surface_triangles == NULL) { return 0; } grains[i].num_triangles = 0; buffer_count[i] = 1; } XYZ pi; int present[9]; int num_present; present[0] = -1; // Cycle through each 'cube' in dataset and calculate the surface for // each of the grains present in the cube. //-------------------------------------------------- for(index=0;index<nv;index++){ if (index%1000 == 0) { //#pragma omp critial(display) progress_callback((float)index / nv,"Calculating surfaces", false); } pi.x = (index % tx)-1; pi.y = (index/dy % ty)-1; pi.z = (index/dz % tz)-1; num_present = 1; for(j=0;j<8;j++) { grid.p[j].x = pi.x + grid_offset[j][X]; grid.p[j].y = pi.y + grid_offset[j][Y]; grid.p[j].z = pi.z + grid_offset[j][Z]; if (grid.p[j].x<0 || grid.p[j].y<0 || grid.p[j].z<0 || grid.p[j].x>=(tx-2) || grid.p[j].y>=(ty-2) || grid.p[j].z>=(tz-2)) { grid.grain[j] = -1; } else { in = index - dz + grid_offset[j][X] + grid_offset[j][Y]*dy + grid_offset[j][Z]*dz; grid.grain[j] = data[in].grain; // Keep track of which grains are present //-------------------------------------------------- if (data[in].grain >= 0) { already_marked = false; for(k=0;k<num_present;k++){ if (data[in].grain == present[k]) { already_marked = true; } } if (!already_marked) { present[ num_present++ ] = data[in].grain; } } } } for(j=1;j<num_present;j++){ num_triangles = grains[ present[j] ].num_triangles; // Make sure there's enough space in the buffer //-------------------------------------------------- if(num_triangles+12 > buffer_count[ present[j] ]*TRIANGLE_BUFFER) { grains[ present[j] ].surface_triangles = (Triangle*) realloc(grains[ present[j] ].surface_triangles, sizeof(Triangle)*(++buffer_count[present[j]])*TRIANGLE_BUFFER); if (grains[ present[j] ].surface_triangles == NULL) { return 0; } } new_triangles = polygonise(grid, present[j], grains[ present[j] ].surface_triangles+num_triangles); grains[ present[j] ].num_triangles += new_triangles; } } for(i=0;i<num_grains;i++){ sa = 0; t = grains[i].surface_triangles; // Calculate surface area of grains //-------------------------------------------------- for(j=0;j<grains[i].num_triangles;j++){ AB[X] = (t->p[0].x - t->p[1].x)*(double)steps[X]; AB[Y] = (t->p[0].y - t->p[1].y)*(double)steps[Y]; AB[Z] = (t->p[0].z - t->p[1].z)*(double)steps[Z]; AC[X] = (t->p[0].x - t->p[2].x)*(double)steps[X]; AC[Y] = (t->p[0].y - t->p[2].y)*(double)steps[Y]; AC[Z] = (t->p[0].z - t->p[2].z)*(double)steps[Z]; ABAC = AB[X]*AC[X]+AB[Y]*AC[Y]+AB[Z]*AC[Z]; sa += 0.5 * sqrt( (AB[X]*AB[X] + AB[Y]*AB[Y] + AB[Z]*AB[Z])* (AC[X]*AC[X] + AC[Y]*AC[Y] + AC[Z]*AC[Z])- ABAC*ABAC ); t++; } grains[i].surface_area = (float) sa; } free(buffer_count); progress_callback(1,"Calculating grain surfaces", false); }
void CNodeDefManager::updateTextures(IGameDef *gamedef, void (*progress_callback)(void *progress_args, u32 progress, u32 max_progress), void *progress_callback_args) { #ifndef SERVER infostream << "CNodeDefManager::updateTextures(): Updating " "textures in node definitions" << std::endl; ITextureSource *tsrc = gamedef->tsrc(); IShaderSource *shdsrc = gamedef->getShaderSource(); scene::ISceneManager* smgr = gamedef->getSceneManager(); scene::IMeshManipulator* meshmanip = smgr->getMeshManipulator(); bool new_style_water = g_settings->getBool("new_style_water"); bool new_style_leaves = g_settings->getBool("new_style_leaves"); bool connected_glass = g_settings->getBool("connected_glass"); bool opaque_water = g_settings->getBool("opaque_water"); bool enable_shaders = g_settings->getBool("enable_shaders"); bool enable_bumpmapping = g_settings->getBool("enable_bumpmapping"); bool enable_parallax_occlusion = g_settings->getBool("enable_parallax_occlusion"); bool enable_mesh_cache = g_settings->getBool("enable_mesh_cache"); bool use_normal_texture = enable_shaders && (enable_bumpmapping || enable_parallax_occlusion); u32 size = m_content_features.size(); for (u32 i = 0; i < size; i++) { ContentFeatures *f = &m_content_features[i]; // Figure out the actual tiles to use TileDef tiledef[6]; for (u32 j = 0; j < 6; j++) { tiledef[j] = f->tiledef[j]; if (tiledef[j].name == "") tiledef[j].name = "unknown_node.png"; } bool is_liquid = false; bool is_water_surface = false; u8 material_type = (f->alpha == 255) ? TILE_MATERIAL_BASIC : TILE_MATERIAL_ALPHA; switch (f->drawtype) { default: case NDT_NORMAL: f->solidness = 2; break; case NDT_AIRLIKE: f->solidness = 0; break; case NDT_LIQUID: assert(f->liquid_type == LIQUID_SOURCE); if (opaque_water) f->alpha = 255; if (new_style_water){ f->solidness = 0; } else { f->solidness = 1; f->backface_culling = false; } is_liquid = true; break; case NDT_FLOWINGLIQUID: assert(f->liquid_type == LIQUID_FLOWING); f->solidness = 0; if (opaque_water) f->alpha = 255; is_liquid = true; break; case NDT_GLASSLIKE: f->solidness = 0; f->visual_solidness = 1; break; case NDT_GLASSLIKE_FRAMED: f->solidness = 0; f->visual_solidness = 1; break; case NDT_GLASSLIKE_FRAMED_OPTIONAL: f->solidness = 0; f->visual_solidness = 1; f->drawtype = connected_glass ? NDT_GLASSLIKE_FRAMED : NDT_GLASSLIKE; break; case NDT_ALLFACES: f->solidness = 0; f->visual_solidness = 1; break; case NDT_ALLFACES_OPTIONAL: if (new_style_leaves) { f->drawtype = NDT_ALLFACES; f->solidness = 0; f->visual_solidness = 1; } else { f->drawtype = NDT_NORMAL; f->solidness = 2; for (u32 i = 0; i < 6; i++) tiledef[i].name += std::string("^[noalpha"); } if (f->waving == 1) material_type = TILE_MATERIAL_WAVING_LEAVES; break; case NDT_PLANTLIKE: f->solidness = 0; f->backface_culling = false; if (f->waving == 1) material_type = TILE_MATERIAL_WAVING_PLANTS; break; case NDT_FIRELIKE: f->backface_culling = false; f->solidness = 0; break; case NDT_MESH: f->solidness = 0; f->backface_culling = false; break; case NDT_TORCHLIKE: case NDT_SIGNLIKE: case NDT_FENCELIKE: case NDT_RAILLIKE: case NDT_NODEBOX: f->solidness = 0; break; } if (is_liquid) { material_type = (f->alpha == 255) ? TILE_MATERIAL_LIQUID_OPAQUE : TILE_MATERIAL_LIQUID_TRANSPARENT; if (f->name == "default:water_source") is_water_surface = true; } u32 tile_shader[6]; for (u16 j = 0; j < 6; j++) { tile_shader[j] = shdsrc->getShader("nodes_shader", material_type, f->drawtype); } if (is_water_surface) { tile_shader[0] = shdsrc->getShader("water_surface_shader", material_type, f->drawtype); } // Tiles (fill in f->tiles[]) for (u16 j = 0; j < 6; j++) { fillTileAttribs(tsrc, &f->tiles[j], &tiledef[j], tile_shader[j], use_normal_texture, f->backface_culling, f->alpha, material_type); } // Special tiles (fill in f->special_tiles[]) for (u16 j = 0; j < CF_SPECIAL_COUNT; j++) { fillTileAttribs(tsrc, &f->special_tiles[j], &f->tiledef_special[j], tile_shader[j], use_normal_texture, f->tiledef_special[j].backface_culling, f->alpha, material_type); } if ((f->drawtype == NDT_MESH) && (f->mesh != "")) { // Meshnode drawtype // Read the mesh and apply scale f->mesh_ptr[0] = gamedef->getMesh(f->mesh); if (f->mesh_ptr[0]){ v3f scale = v3f(1.0, 1.0, 1.0) * BS * f->visual_scale; scaleMesh(f->mesh_ptr[0], scale); recalculateBoundingBox(f->mesh_ptr[0]); meshmanip->recalculateNormals(f->mesh_ptr[0], true, false); } } else if ((f->drawtype == NDT_NODEBOX) && ((f->node_box.type == NODEBOX_REGULAR) || (f->node_box.type == NODEBOX_FIXED)) && (!f->node_box.fixed.empty())) { //Convert regular nodebox nodes to meshnodes //Change the drawtype and apply scale f->drawtype = NDT_MESH; f->mesh_ptr[0] = convertNodeboxNodeToMesh(f); v3f scale = v3f(1.0, 1.0, 1.0) * f->visual_scale; scaleMesh(f->mesh_ptr[0], scale); recalculateBoundingBox(f->mesh_ptr[0]); meshmanip->recalculateNormals(f->mesh_ptr[0], true, false); } //Cache 6dfacedir and wallmounted rotated clones of meshes if (enable_mesh_cache && f->mesh_ptr[0] && (f->param_type_2 == CPT2_FACEDIR)) { for (u16 j = 1; j < 24; j++) { f->mesh_ptr[j] = cloneMesh(f->mesh_ptr[0]); rotateMeshBy6dFacedir(f->mesh_ptr[j], j); recalculateBoundingBox(f->mesh_ptr[j]); meshmanip->recalculateNormals(f->mesh_ptr[j], true, false); } } else if (enable_mesh_cache && f->mesh_ptr[0] && (f->param_type_2 == CPT2_WALLMOUNTED)) { static const u8 wm_to_6d[6] = {20, 0, 16+1, 12+3, 8, 4+2}; for (u16 j = 1; j < 6; j++) { f->mesh_ptr[j] = cloneMesh(f->mesh_ptr[0]); rotateMeshBy6dFacedir(f->mesh_ptr[j], wm_to_6d[j]); recalculateBoundingBox(f->mesh_ptr[j]); meshmanip->recalculateNormals(f->mesh_ptr[j], true, false); } rotateMeshBy6dFacedir(f->mesh_ptr[0], wm_to_6d[0]); recalculateBoundingBox(f->mesh_ptr[0]); meshmanip->recalculateNormals(f->mesh_ptr[0], true, false); } progress_callback(progress_callback_args, i, size); } #endif }
/*--------------------------------------------------------------------------- // Function exec_progress_callback: // convenient function to call progress callback function // and set current progress // // In case of single frame volume, set frame to 0 and // total_frames to 1 // ---------------------------------------------------------------------------*/ void exec_progress_callback(int slice, int total_slices, int frame, int total_frames) { if (progress_callback) progress_callback(global_progress_range[0] + (global_progress_range[1]-global_progress_range[0])*(slice+total_slices*frame)/(total_slices*total_frames)); }
// // Use the QuikGrid library to generate a grid from a set of 3D points. // bool vtElevLayer::CreateFromPoints(vtFeatureSet *set, const IPoint2 &size, float fDistanceRatio) { #if SUPPORT_QUIKGRID vtFeatureSetPoint3D *fsp3 = dynamic_cast<vtFeatureSetPoint3D *>(set); if (!fsp3) return false; DRECT extent; fsp3->ComputeExtent(extent); int iMaxSize = fsp3->NumEntities(); ScatData sdata(iMaxSize); DPoint3 p; for (int i = 0; i < iMaxSize; i++) { fsp3->GetPoint(i, p); sdata.SetNext(p.x, p.y, p.z); } // Make a SurfaceGrid to hold the results DPoint2 spacing(extent.Width() / (size.x-1), extent.Height() / (size.y-1)); SurfaceGrid Zgrid(size.x, size.y); for (int x = 0; x < size.x; x++) Zgrid.xset(x, extent.left + spacing.x * x); for (int y = 0; y < size.y; y++) Zgrid.yset(y, extent.bottom + spacing.y * y); // "When any new points will not contributed more than 1/(scan bandwidth cutoff) // towards the value of a grid intersection scanning will cease in that // direction. " int x1 = XpandScanRatio(); // default 16, valid values 1..100 // "The Distance cutoff specifies a percent of the Density Distance" int x2 = XpandDensityRatio(); // default 150, valid values 1..10000 int x3 = XpandEdgeFactor(); // default 100, valid values 1..10000 float x4 = XpandUndefinedZ(); long x5 = XpandSample(); XpandDensityRatio((int) (fDistanceRatio * 100)); // Do the expand operation, gradually so we get progress XpandInit(Zgrid, sdata); int count = 0, total = size.x * size.y; while (XpandPoint( Zgrid, sdata)) { if ((count % 100) == 0) { if (progress_callback(count * 99 / total)) { // user cancelled return false; } } count++; } // copy the result to a ElevationGrid m_pGrid = new vtElevationGrid(extent, size, true, set->GetAtProjection()); for (int x = 0; x < size.x; x++) for (int y = 0; y < size.y; y++) { float value = Zgrid.z(x,y); if (value == -99999) m_pGrid->SetFValue(x, y, INVALID_ELEVATION); else m_pGrid->SetFValue(x, y, value); } m_pGrid->ComputeHeightExtents(); m_pGrid->SetupLocalCS(); return true; #else // No QuikGrid return false; #endif }
int old_progress_callback(void* p, double dltotal, double dlnow, double ultotal, double ulnow) { return progress_callback(p, (curl_off_t)dltotal, (curl_off_t)dlnow, (curl_off_t)ultotal, (curl_off_t)ulnow); }
__declspec(dllexport) void download (HWND parent, int string_size, char *variables, stack_t **stacktop) { static char buf[1024]; static char url[1024]; static char filename[1024]; int wasen=0; HWND hwndL=0; HWND hwndB=0; static char szDownloading[32];//= "Downloading %s"; static char szConnecting[32];//= "Connecting ..."; static char szSecond[32];//= "second"; static char szMinute[32];//= "minute"; static char szHour[32];//= "hour"; static char szPlural[32];//= "s"; static char szProgress[128];//= "%dkB (%d%%) of %dkB @ %d.%01dkB/s"; static char szRemaining[128];//= " (%d %s%s remaining)"; g_parent = parent; EXDLL_INIT(); popstring(url); if (!lstrcmpi(url, "/TRANSLATE")) { popstring(szDownloading); popstring(szConnecting); popstring(szSecond); popstring(szMinute); popstring(szHour); popstring(szPlural); popstring(szProgress); popstring(szRemaining); popstring(url); } else { lstrcpy(szDownloading, "Downloading %s"); lstrcpy(szConnecting, "Connecting ..."); lstrcpy(szSecond, "second"); lstrcpy(szMinute, "minute"); lstrcpy(szHour, "hour"); lstrcpy(szPlural, "s"); lstrcpy(szProgress, "%dkB (%d%%) of %dkB @ %d.%01dkB/s"); lstrcpy(szRemaining, " (%d %s%s remaining)"); } lstrcpyn(buf, url, 10); if (!lstrcmpi(buf, "/TIMEOUT=")) { g_timeout_ms=my_atoi(url+9); popstring(url); } popstring(filename); HANDLE hFile = CreateFile(filename,GENERIC_WRITE,FILE_SHARE_READ,NULL,CREATE_ALWAYS,0,NULL); if (hFile == INVALID_HANDLE_VALUE) { wsprintf (buf, "Unable to open %s", filename); setuservariable(INST_0, buf); } else { if (g_parent) { g_childwnd=FindWindowEx(g_parent,NULL,"#32770",NULL); hwndL=GetDlgItem(g_childwnd,1016); hwndB=GetDlgItem(g_childwnd,1027); if (hwndL && IsWindowVisible(hwndL)) ShowWindow(hwndL,SW_HIDE); else hwndL=NULL; if (hwndB && IsWindowVisible(hwndB)) ShowWindow(hwndB,SW_HIDE); else hwndB=NULL; wasen=EnableWindow(GetDlgItem(g_parent,IDCANCEL),1); lpWndProcOld = (void *) GetWindowLong(g_parent,GWL_WNDPROC); SetWindowLong(g_parent,GWL_WNDPROC,(long)ParentWndProc); g_dialog = CreateDialog((HINSTANCE)hModule, MAKEINTRESOURCE(IDD_DIALOG1), g_childwnd, DownloadDialogProc); if (g_dialog) { GetWindowRect(g_dialog,&cr); ScreenToClient(g_dialog,(LPPOINT)&cr); ScreenToClient(g_dialog,((LPPOINT)&cr)+1); GetWindowRect(GetDlgItem(g_childwnd,1016),&r); ScreenToClient(g_childwnd,(LPPOINT)&r); ScreenToClient(g_childwnd,((LPPOINT)&r)+1); SetWindowPos(g_dialog,0,r.left,r.top,r.right-r.left,cr.bottom-cr.top,SWP_NOACTIVATE|SWP_NOZORDER); AdjustSize(IDC_STATIC2); AdjustSize(IDC_PROGRESS1); ShowWindow(g_dialog,SW_SHOWNA); char *p=filename; while (*p) p++; while (*p != '\\' && p != filename) p=CharPrev(filename,p); wsprintf(buf,szDownloading, p+1); SetDlgItemText(g_childwnd,1006,buf); SetDlgItemText (g_dialog, IDC_STATIC2, szConnecting); } } g_hwndProgressBar = GetDlgItem (g_dialog, IDC_PROGRESS1); JNL_HTTPGet *get; char *error=NULL; { WSADATA wsaData; WSAStartup(MAKEWORD(1, 1), &wsaData); static char buf[8192]=""; char *p=NULL; HKEY hKey; if (RegOpenKeyEx(HKEY_CURRENT_USER,"Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",0,KEY_READ,&hKey) == ERROR_SUCCESS) { DWORD l = 4; DWORD t; DWORD v; if (RegQueryValueEx(hKey,"ProxyEnable",NULL,&t,(unsigned char *)&v,&l) == ERROR_SUCCESS && t == REG_DWORD && v) { l=8192; if (RegQueryValueEx(hKey,"ProxyServer",NULL,&t,(unsigned char *)buf,&l ) == ERROR_SUCCESS && t == REG_SZ) { p=strstr(buf,"http="); if (!p) p=buf; else { p+=5; } char *tp=strstr(p,";"); if (tp) *tp=0; char *p2=strstr(p,"="); if (p2) p=0; // we found the wrong proxy } } buf[8192-1]=0; RegCloseKey(hKey); } DWORD start_time=GetTickCount(); get=new JNL_HTTPGet(JNL_CONNECTION_AUTODNS,16384,(p&&p[0])?p:NULL); int st; int has_printed_headers = 0; int cl; int len; int sofar = 0; DWORD last_recv_time=start_time; get->addheader ("User-Agent: NSISDL/1.2 (Mozilla)"); get->addheader ("Accept: */*"); get->connect (url); while (1) { if (g_dialog) { MSG msg; while (PeekMessage(&msg,g_dialog,0,0,PM_REMOVE)) { TranslateMessage(&msg); DispatchMessage(&msg); } } Sleep(25); if (g_cancelled) break; st = get->run (); if (st == -1) { error=get->geterrorstr(); break; } else if (st == 1) { if (sofar < cl) error="download incomplete"; break; } else { if (get->get_status () == 0) { // progressFunc ("Connecting ...", 0); if (last_recv_time+g_timeout_ms < GetTickCount()) { error = "Timed out on connecting."; break; } } else if (get->get_status () == 1) { progress_callback("Reading headers", 0); if (last_recv_time+g_timeout_ms < GetTickCount()) { error = "Timed out on getting headers."; break; } } else if (get->get_status () == 2) { if (! has_printed_headers) { has_printed_headers = 1; last_recv_time=GetTickCount(); cl = get->content_length (); if (cl == 0) { error = "Server did not specify content length."; break; } else if (g_dialog) { SendMessage(g_hwndProgressBar, PBM_SETRANGE, 0, MAKELPARAM(0,30000)); g_file_size=cl; } } while ((len = get->bytes_available ()) > 0) { if (len > 8192) len = 8192; len = get->get_bytes (buf, len); if (len > 0) { last_recv_time=GetTickCount(); DWORD dw; WriteFile(hFile,buf,len,&dw,NULL); sofar += len; int time_sofar=(GetTickCount()-start_time)/1000; int bps=sofar/(time_sofar?time_sofar:1); int remain=MulDiv(time_sofar,cl,sofar) - time_sofar; char *rtext=szSecond; if (remain >= 60) { remain/=60; rtext=szMinute; if (remain >= 60) { remain/=60; rtext=szHour; } } wsprintf (buf, szProgress, sofar/1024, MulDiv(100,sofar,cl), cl/1024, bps/1024,((bps*10)/1024)%10 ); if (remain) wsprintf(buf+lstrlen(buf),szRemaining, remain, rtext, remain==1?"":szPlural ); progress_callback(buf, sofar); } else { if (sofar < cl) error = "Server aborted."; break; } } if (GetTickCount() > last_recv_time+g_timeout_ms) { error = "Downloading timed out."; break; } } else { error = "Bad response status."; break; } } } WSACleanup(); } CloseHandle(hFile); if (g_parent) { if (g_dialog) DestroyWindow(g_dialog); if (lpWndProcOld) SetWindowLong(g_parent,GWL_WNDPROC,(long)lpWndProcOld); if (g_childwnd) { if (hwndB) ShowWindow(hwndB,SW_SHOWNA); if (hwndL) ShowWindow(hwndL,SW_SHOWNA); } if (wasen) EnableWindow(GetDlgItem(g_parent,IDCANCEL),0); } if (g_cancelled) { setuservariable(INST_0, "cancel"); DeleteFile(filename); } else if (error == NULL) { setuservariable(INST_0, "success"); } else { DeleteFile(filename); setuservariable(INST_0, error); } delete get; } }
/** * Loads elevation from a USGS DEM file. * * Some non-standard variations of the DEM format are supported. * * You should call SetupLocalCS() after loading if you will be doing * heightfield operations on this grid. * * \returns \c true if the file was successfully opened and read. */ bool vtElevationGrid::LoadFromDEM(const char *szFileName, bool progress_callback(int), vtElevError *err) { // Free buffers to prepare to receive new data FreeData(); if (progress_callback != NULL) progress_callback(0); FILE *fp = vtFileOpen(szFileName,"rb"); if (!fp) // Cannot Open File { SetError(err, vtElevError::FILE_OPEN, "Couldn't open file '%s'", szFileName); return false; } // check for version of DEM format int iRow, iColumn; char buffer[158]; fseek(fp, 864, 0); if (fread(buffer, 144, 1, fp) != 1) { SetError(err, vtElevError::READ_DATA, "Couldn't read DEM data from '%s'", szFileName); return false; } bool bOldFormat = (strncmp(buffer, " 1 1", 12) == 0); bool bNewFormat = false; bool bFixedLength = true; int iDataStartOffset = 1024; // set here to avoid compiler warning int i, j; if (bOldFormat) iDataStartOffset = 1024; // 1024 is record length else { fseek(fp, 1024, 0); // Check for New Format IConvert(fp, 6, iRow); IConvert(fp, 6, iColumn); if (iRow==1 && iColumn==1) // File OK? { bNewFormat = true; iDataStartOffset = 1024; } else { // might be the Non-fixed-length record format // Record B can start anywhere from 865 to 1023 // Record B is identified by starting with the row/column // of its first profile, " 1 1" fseek(fp, 865, 0); if (fread(buffer, 158, 1, fp) != 1) { SetError(err, vtElevError::READ_DATA, "Couldn't read DEM data from '%s'", szFileName); fclose(fp); return false; } for (i = 0; i < 158-12; i++) { if (!strncmp(buffer+i, " 1 1", 12)) { // Found it bFixedLength = false; iDataStartOffset = 865+i; break; } } if (i == 158-12) { // Not a DEM file SetError(err, vtElevError::READ_DATA, "Couldn't read DEM data from '%s'", szFileName); fclose(fp); return false; } } } // Read the embedded DEM name char szName[41]; fseek(fp, 0, 0); if (fgets(szName, 41, fp) == NULL) return false; int len = strlen(szName); // trim trailing whitespace while (len > 0 && szName[len-1] == ' ') { szName[len-1] = 0; len--; } m_strOriginalDEMName = szName; fseek(fp, 156, 0); int iCoordSystem, iUTMZone; IConvert(fp, 6, iCoordSystem); IConvert(fp, 6, iUTMZone); fseek(fp, 168, 0); double dProjParams[15]; for (i = 0; i < 15; i++) { if (!DConvert(fp, 24, dProjParams[i], DEBUG_DEM)) return false; } int iDatum = EPSG_DATUM_NAD27; // default // OLD format header ends at byte 864 (0x360); new format has Datum if (bNewFormat) { // year of data compilation char szDateBuffer[5]; fseek(fp, 876, 0); // 0x36C if (fread(szDateBuffer, 4, 1, fp) != 1) return false; szDateBuffer[4] = 0; // Horizontal datum // 1=North American Datum 1927 (NAD 27) // 2=World Geodetic System 1972 (WGS 72) // 3=WGS 84 // 4=NAD 83 // 5=Old Hawaii Datum // 6=Puerto Rico Datum fseek(fp, 890, 0); // 0x37A int datum; IConvert(fp, 2, datum); VTLOG("DEM Reader: Read Datum Value %d\n", datum); switch (datum) { case 1: iDatum = EPSG_DATUM_NAD27; break; case 2: iDatum = EPSG_DATUM_WGS72; break; case 3: iDatum = EPSG_DATUM_WGS84; break; case 4: iDatum = EPSG_DATUM_NAD83; break; case 5: iDatum = EPSG_DATUM_OLD_HAWAIIAN; break; case 6: iDatum = EPSG_DATUM_PUERTO_RICO; break; } } fseek(fp, 528, 0); int iGUnit, iVUnit; IConvert(fp, 6, iGUnit); IConvert(fp, 6, iVUnit); // Ground (Horizontal) Units in meters double fGMeters; switch (iGUnit) { case 0: fGMeters = 1.0; break; // 0 = radians (never encountered) case 1: fGMeters = 0.3048; break; // 1 = feet case 2: fGMeters = 1.0; break; // 2 = meters case 3: fGMeters = 30.922; break; // 3 = arc-seconds } // Vertical Units in meters double fVertUnits; switch (iVUnit) { case 1: fVertUnits = 0.3048; break; // feet to meter conversion case 2: fVertUnits = 1.0; break; // meters == meters default: fVertUnits = 1.0; break; // anything else, assume meters } fseek(fp, 816, 0); double dxdelta, dydelta, dzdelta; DConvert(fp, 12, dxdelta, DEBUG_DEM); // dxdelta (unused) DConvert(fp, 12, dydelta, DEBUG_DEM); DConvert(fp, 12, dzdelta, DEBUG_DEM); m_bFloatMode = false; // Read the coordinates of the 4 corners VTLOG("DEM corners:\n"); DPoint2 corners[4]; // SW, NW, NE, SE fseek(fp, 546, 0); for (i = 0; i < 4; i++) { DConvert(fp, 24, corners[i].x, DEBUG_DEM); DConvert(fp, 24, corners[i].y, DEBUG_DEM); } for (i = 0; i < 4; i++) VTLOG(" (%lf, %lf)", corners[i].x, corners[i].y); VTLOG("\n"); // Set up the projection and corners bool bGeographic = (iCoordSystem == 0); if (bGeographic) { for (i = 0; i < 4; i++) { // convert arcseconds to degrees m_Corners[i].x = corners[i].x / 3600.0; m_Corners[i].y = corners[i].y / 3600.0; } } else { // some linear coordinate system for (i = 0; i < 4; i++) m_Corners[i] = corners[i]; } // Special case. Some old DEMs claim to be NAD27, but they are of Hawai'i, // and Hawai'i has no NAD27, it is actually OHD. if (iDatum == EPSG_DATUM_NAD27) { DRECT Hawaii(0,0,0,0); if (bGeographic) Hawaii.SetRect(-164, 24, -152, 17); else if (iCoordSystem == 1) // UTM { if (iUTMZone == 4) Hawaii.SetRect(240000, 2600000, 1000000, 2000000); else if (iUTMZone == 5) Hawaii.SetRect(-400000, 2600000, 400000, 2000000); } for (i = 0; i < 4; i++) { if (Hawaii.ContainsPoint(m_Corners[i])) iDatum = EPSG_DATUM_OLD_HAWAIIAN; } } bool bSuccessfulCRS = true; switch (iCoordSystem) { case 0: // geographic (lat-lon) iUTMZone = -1; bSuccessfulCRS = m_proj.SetProjectionSimple(false, iUTMZone, iDatum); break; case 1: // utm m_proj.SetProjectionSimple(true, iUTMZone, iDatum); break; case 3: // Albers Conical Equal Area { // The Official DEM documentation says: // "Note: All angles (latitudes, longitudes, or azimuth) are // required in degrees, minutes, and arc seconds in the packed // real number format +DDDOMMOSS.SSSSS." // However, what i've actually seen is values like: // 0.420000000000000D+06' -> 420000 // for 42 degrees, which is off by a decimal point from the DEM docs. // So, intepret the values with factor of 10000 to convert to degrees: // double semi_major = dProjParams[0]; // unused // double eccentricity = dProjParams[1]; // unused double lat_1st_std_parallel = dProjParams[2] / 10000; double lat_2nd_std_parallel = dProjParams[3] / 10000; double lon_central_meridian = dProjParams[4] / 10000; double lat_origin = dProjParams[5] / 10000; double false_easting = dProjParams[6]; double false_northing = dProjParams[7]; m_proj.SetGeogCSFromDatum(iDatum); m_proj.SetACEA(lat_1st_std_parallel, lat_2nd_std_parallel, lat_origin, lon_central_meridian, false_easting, false_northing); } break; case 2: // State Plane (!) case 4: // Lambert Conformal case 5: // Mercator case 6: // Polar Stereographic case 7: // Polyconic case 8: // Equidistant Conic Type A / B case 9: // Transverse Mercator case 10: // Stereographic case 11: // Lambert Azimuthal Equal-Area case 12: // Azimuthal Equidistant case 13: // Gnomonic case 14: // Orthographic case 15: // General Vertical Near-Side Perspective case 16: // Sinusoidal (Plate Caree) case 17: // Equirectangular case 18: // Miller Cylindrical case 19: // Van Der Grinten I case 20: // Oblique Mercator VTLOG("Warning! We don't yet support DEM coordinate system %d.\n", iCoordSystem); break; } // We must have a functional CRS, or it will sebsequently fail if (!bSuccessfulCRS) { SetError(err, vtElevError::READ_CRS, "Couldn't determine CRS of DEM file"); return false; } double dElevMin, dElevMax; DConvert(fp, 24, dElevMin, DEBUG_DEM); DConvert(fp, 24, dElevMax, DEBUG_DEM); fseek(fp, 852, 0); int iRows, iProfiles; IConvert(fp, 6, iRows); // This "Rows" value will always be 1 IConvert(fp, 6, iProfiles); VTLOG("DEM profiles: %d\n", iProfiles); m_iSize.x = iProfiles; // values we'll need while scanning the elevation profiles int iProfileRows, iProfileCols; int iElev; double dLocalDatumElev, dProfileMin, dProfileMax; int ygap; double dMinY; DPoint2 start; if (bGeographic) { // If it's in degrees, it's flush square, so we can simply // derive the extents (m_EarthExtents) from the quad corners (m_Corners) ComputeExtentsFromCorners(); dMinY = std::min(corners[0].y, corners[3].y); } else { VTLOG("DEM scanning to compute extents\n"); m_EarthExtents.SetInsideOut(); if (!bFixedLength) fseek(fp, iDataStartOffset, 0); // Need to scan over all the profiles, accumulating the TRUE // extents of the actual data points. int record = 0; int data_len; for (i = 0; i < iProfiles; i++) { if (progress_callback != NULL) progress_callback(i*49/iProfiles); if (bFixedLength) fseek(fp, iDataStartOffset + (record * 1024), 0); // We cannot use IConvert here, because there *might* be a spurious LF // after the number - seen in some rare files. if (fscanf(fp, "%d", &iRow) != 1) { SetError(err, vtElevError::READ_DATA, "Error reading DEM at profile %d of %d", i, iProfiles); return false; } IConvert(fp, 6, iColumn); // assert(iColumn == i+1); IConvert(fp, 6, iProfileRows); IConvert(fp, 6, iProfileCols); DConvert(fp, 24, start.x); DConvert(fp, 24, start.y); m_EarthExtents.GrowToContainPoint(start); start.y += ((iProfileRows-1) * dydelta); m_EarthExtents.GrowToContainPoint(start); if (bFixedLength) { record++; data_len = 144 + (iProfileRows * 6); while (data_len > 1020) // max bytes in a record { data_len -= 1020; record++; } } else { DConvert(fp, 24, dLocalDatumElev); DConvert(fp, 24, dProfileMin); DConvert(fp, 24, dProfileMax); for (j = 0; j < iProfileRows; j++) { // We cannot use IConvert here, because there *might* be a spurious LF // after the number - seen in some rare files. if (fscanf(fp, "%d", &iElev) != 1) return false; } } } dMinY = m_EarthExtents.bottom; } VTLOG("DEM extents LRTB: %lf, %lf, %lf, %lf\n", m_EarthExtents.left, m_EarthExtents.right, m_EarthExtents.top, m_EarthExtents.bottom); // Compute number of rows double fRows; if (bGeographic) { // degrees fRows = m_EarthExtents.Height() / dydelta * 3600.0f; m_iSize.y = (int)fRows + 1; // 1 more than quad spacing } else { // some linear coordinate system fRows = m_EarthExtents.Height() / dydelta; m_iSize.y = (int)(fRows + 0.5) + 1; // round to the nearest integer } // safety check if (m_iSize.y > 20000) return false; if (!AllocateGrid(err)) return false; // jump to start of actual data fseek(fp, iDataStartOffset, 0); for (i = 0; i < iProfiles; i++) { if (progress_callback != NULL) progress_callback(50+i*49/iProfiles); // We cannot use IConvert here, because there *might* be a spurious LF // after the number - seen in some rare files. if (fscanf(fp, "%d", &iRow) != 1) return false; IConvert(fp, 6, iColumn); //assert(iColumn == i+1); IConvert(fp, 6, iProfileRows); IConvert(fp, 6, iProfileCols); DConvert(fp, 24, start.x); DConvert(fp, 24, start.y); DConvert(fp, 24, dLocalDatumElev); DConvert(fp, 24, dProfileMin); DConvert(fp, 24, dProfileMax); ygap = (int)((start.y - dMinY)/dydelta); for (j = ygap; j < (ygap + iProfileRows); j++) { //assert(j >=0 && j < m_iSize.y); // useful safety check // We cannot use IConvert here, because there *might* be a spurious LF // after the number - seen in some rare files. if (fscanf(fp, "%d", &iElev) != 1) return false; if (iElev == -32767 || iElev == -32768) SetValue(i, j, INVALID_ELEVATION); else { // The DEM spec says: // "A value in this array would be multiplied by the "z" spatial // resolution (data element 15, record type A) and added to the // "Elevation of local datum for the profile" (data element 4, record // type B) to obtain the elevation for the point." SetValue(i, j, (short) iElev + (short) dLocalDatumElev); } } } fclose(fp); m_fVMeters = (float) (fVertUnits * dzdelta); ComputeHeightExtents(); if (m_fMinHeight != dElevMin || m_fMaxHeight != dElevMax) VTLOG("DEM Reader: elevation extents in .dem (%.1f, %.1f) don't match data (%.1f, %.1f).\n", dElevMin, dElevMax, m_fMinHeight, m_fMaxHeight); return true; }
__declspec(dllexport) void download (HWND parent, int string_size, TCHAR *variables, stack_t **stacktop) { char buf[1024]; char url[1024]; char filename[1024]; static char proxy[1024]; BOOL bSuccess=FALSE; int timeout_ms=30000; int getieproxy=1; int manualproxy=0; int translation_version; const char *error=NULL; // translation version 2 & 1 static char szDownloading[1024]; // "Downloading %s" static char szConnecting[1024]; // "Connecting ..." static char szSecond[1024]; // " (1 second remaining)" for v2 // "second" for v1 static char szMinute[1024]; // " (1 minute remaining)" for v2 // "minute" for v1 static char szHour[1024]; // " (1 hour remaining)" for v2 // "hour" for v1 static char szProgress[1024]; // "%skB (%d%%) of %skB at %u.%01ukB/s" for v2 // "%dkB (%d%%) of %dkB at %d.%01dkB/s" for v1 // translation version 2 only static char szSeconds[1024]; // " (%u seconds remaining)" static char szMinutes[1024]; // " (%u minutes remaining)" static char szHours[1024]; // " (%u hours remaining)" // translation version 1 only static char szPlural[1024]; // "s"; static char szRemaining[1024]; // " (%d %s%s remaining)"; EXDLL_INIT(); PopStringA(url); if (!lstrcmpiA(url, "/TRANSLATE2")) { PopStringA(szDownloading); PopStringA(szConnecting); PopStringA(szSecond); PopStringA(szMinute); PopStringA(szHour); PopStringA(szSeconds); PopStringA(szMinutes); PopStringA(szHours); PopStringA(szProgress); PopStringA(url); translation_version=2; } else if (!lstrcmpiA(url, "/TRANSLATE")) { PopStringA(szDownloading); PopStringA(szConnecting); PopStringA(szSecond); PopStringA(szMinute); PopStringA(szHour); PopStringA(szPlural); PopStringA(szProgress); PopStringA(szRemaining); PopStringA(url); translation_version=1; } else { lstrcpyA(szDownloading, "Downloading %s"); lstrcpyA(szConnecting, "Connecting ..."); lstrcpyA(szSecond, " (1 second remaining)"); lstrcpyA(szMinute, " (1 minute remaining)"); lstrcpyA(szHour, " (1 hour remaining)"); lstrcpyA(szSeconds, " (%u seconds remaining)"); lstrcpyA(szMinutes, " (%u minutes remaining)"); lstrcpyA(szHours, " (%u hours remaining)"); lstrcpyA(szProgress, "%skB (%d%%) of %skB at %u.%01ukB/s"); translation_version=2; } lstrcpynA(buf, url, 10); if (!lstrcmpiA(buf, "/TIMEOUT=")) { timeout_ms=my_atoi(url+9); PopStringA(url); } if (!lstrcmpiA(url, "/PROXY")) { getieproxy=0; manualproxy=1; PopStringA(proxy); PopStringA(url); } if (!lstrcmpiA(url, "/NOIEPROXY")) { getieproxy=0; PopStringA(url); } PopStringA(filename); HANDLE hFile = CreateFileA(filename,GENERIC_WRITE,FILE_SHARE_READ,NULL,CREATE_ALWAYS,0,NULL); if (hFile == INVALID_HANDLE_VALUE) { wsprintfA(buf, "Unable to open %s", filename); error = buf; } else { if (parent) { uMsgCreate = RegisterWindowMessage(_T("nsisdl create")); lpWndProcOld = (WNDPROC)SetWindowLongPtr(parent,GWLP_WNDPROC,(LONG_PTR)ParentWndProc); SendMessage(parent, uMsgCreate, TRUE, (LPARAM) parent); // set initial text char *p = filename; while (*p) p++; while (*p !='\\' && p != filename) p = CharPrevA(filename, p); wsprintfA(buf, szDownloading, p != filename ? p + 1 : p); SetDlgItemTextA(childwnd, 1006, buf); SetWindowTextA(g_hwndStatic, szConnecting); } { WSADATA wsaData; WSAStartup(MAKEWORD(1, 1), &wsaData); JNL_HTTPGet *get = 0; static char main_buf[8192]; char *buf=main_buf; char *p=NULL; HKEY hKey; if (getieproxy && RegOpenKeyExA(HKEY_CURRENT_USER,"Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",0,KEY_READ,&hKey) == ERROR_SUCCESS) { DWORD l = 4; DWORD t; DWORD v; if (RegQueryValueExA(hKey,"ProxyEnable",NULL,&t,(unsigned char*)&v,&l) == ERROR_SUCCESS && t == REG_DWORD && v) { l=8192; if (RegQueryValueExA(hKey,"ProxyServer",NULL,&t,(unsigned char *)buf,&l ) == ERROR_SUCCESS && t == REG_SZ) { p=strstr(buf,"http="); if (!p) p=buf; else { p+=5; } char *tp=strstr(p,";"); if (tp) *tp=0; char *p2=strstr(p,"="); if (p2) p=0; // we found the wrong proxy } } buf[8192-1]=0; RegCloseKey(hKey); } if (manualproxy == 1) { p = proxy; } DWORD start_time=GetTickCount(); get=new JNL_HTTPGet(JNL_CONNECTION_AUTODNS,16384,(p&&p[0])?p:NULL); int st; int has_printed_headers = 0; __int64 cl = 0; int len; __int64 sofar = 0; DWORD last_recv_time=start_time; get->addheader ("User-Agent: NSISDL/1.2 (Mozilla)"); get->addheader ("Accept: */*"); get->connect (url); while (1) { if (g_cancelled) error = "cancel"; if (error) { if (parent) { SendMessage(parent, uMsgCreate, FALSE, (LPARAM) parent); SetWindowLongPtr(parent, GWLP_WNDPROC, (LONG_PTR)lpWndProcOld); } break; } st = get->run (); if (st == -1) { lstrcpynA(url, get->geterrorstr(), sizeof(url)); error = url; } else if (st == 1) { if (sofar < cl || get->get_status () != 2) error="download incomplete"; else { bSuccess=TRUE; error = "success"; } } else { if (get->get_status () == 0) { // progressFunc ("Connecting ...", 0); if (last_recv_time+timeout_ms < GetTickCount()) error = "Timed out on connecting."; else Sleep(10); // don't busy-loop while connecting } else if (get->get_status () == 1) { progress_callback("Reading headers", 0); if (last_recv_time+timeout_ms < GetTickCount()) error = "Timed out on getting headers."; else Sleep(10); // don't busy-loop while reading headers } else if (get->get_status () == 2) { if (! has_printed_headers) { has_printed_headers = 1; last_recv_time=GetTickCount(); cl = get->content_length (); if (cl == 0) error = "Server did not specify content length."; else if (g_hwndProgressBar) { SendMessage(g_hwndProgressBar, PBM_SETRANGE, 0, MAKELPARAM(0, 30000)); g_file_size = cl; } } int data_downloaded = 0; while ((len = get->bytes_available ()) > 0) { data_downloaded++; if (len > 8192) len = 8192; len = get->get_bytes (buf, len); if (len > 0) { last_recv_time=GetTickCount(); DWORD dw; WriteFile(hFile,buf,len,&dw,NULL); sofar += len; int time_sofar=(GetTickCount()-start_time)/1000; int bps = (int)(sofar/(time_sofar?time_sofar:1)); int remain = MulDiv64(time_sofar, cl, sofar) - time_sofar; if (translation_version == 2) { char *rtext=remain==1?szSecond:szSeconds;; if (remain >= 60) { remain/=60; rtext=remain==1?szMinute:szMinutes; if (remain >= 60) { remain/=60; rtext=remain==1?szHour:szHours; } } char sofar_str[128]; char cl_str[128]; myitoa64(sofar/1024, sofar_str); myitoa64(cl/1024, cl_str); wsprintfA (buf, szProgress, //%skB (%d%%) of %skB @ %u.%01ukB/s sofar_str, MulDiv64(100, sofar, cl), cl_str, bps/1024,((bps*10)/1024)%10 ); if (remain) wsprintfA(buf+lstrlenA(buf),rtext, remain ); } else if (translation_version == 1) { char *rtext=szSecond; if (remain >= 60) { remain/=60; rtext=szMinute; if (remain >= 60) { remain/=60; rtext=szHour; } } wsprintfA (buf, szProgress, //%dkB (%d%%) of %dkB @ %d.%01dkB/s int(sofar/1024), MulDiv64(100, sofar, cl), int(cl/1024), bps/1024,((bps*10)/1024)%10 ); if (remain) wsprintfA(buf+lstrlenA(buf),szRemaining, remain, rtext, remain==1?"":szPlural ); } progress_callback(buf, sofar); } else { if (sofar < cl) error = "Server aborted."; } } if (GetTickCount() > last_recv_time+timeout_ms) { if (sofar != cl) { error = "Downloading timed out."; } else { // workaround for bug #1713562 // buggy servers that wait for the client to close the connection. // another solution would be manually stopping when cl == sofar, // but then buggy servers that return wrong content-length will fail. bSuccess = TRUE; error = "success"; } } else if (!data_downloaded) Sleep(10); } else { error = "Bad response status."; } } } // Clean up the connection then release winsock if (get) delete get; WSACleanup(); } CloseHandle(hFile); } if (g_cancelled || !bSuccess) { DeleteFileA(filename); } PushStringA(error); }
bool vtDLGFile::Read(const char *fname, bool progress_callback(int)) { char buf[80]; int i, j, iUTMZone; // basic initialization m_iError = 0; m_fname = fname; m_fp = vtFileOpen(fname, "rb"); if (!m_fp) { m_iError = DLG_ERR_FILE; return false; } // check to see if this is a LF-delimited file m_bLFdelimited = false; fseek(m_fp, 56, SEEK_SET); for (i = 0; i < 24; i++) { if (fgetc(m_fp) == 10) m_bLFdelimited = true; } // rewind to beginning fseek(m_fp, 0, SEEK_SET); // record 1 - banner if (!GetRecord(buf)) return false; strncpy(m_header, buf, 72); // record 2 - cell name, date, qualifier, scale, sectional indicator if (!GetRecord(buf)) return false; // record 3 - contour interval info, status flags if (!GetRecord(buf)) return false; // record 4 - codes, resolution, transformation info if (!GetRecord(buf)) return false; int reference_system = geti6(buf + 6); // 1 = UTM, 3 = Albers if (reference_system == 3) // We don't support Albers return false; iUTMZone = geti6(buf + 12); // Datum. Undocumented field! Had to look at the government's // own "dlgv32" source to figure out how to find this value. int iDLGDatum = geti3(buf + 66); // safety check.. because they do if ((iDLGDatum < 0) || (iDLGDatum > 4)) iDLGDatum = 0; // this is how they interpret the value int iDatum; switch (iDLGDatum) { case 0: iDatum = EPSG_DATUM_NAD27; break; case 1: iDatum = EPSG_DATUM_NAD83; break; default: iDatum = -1; break; } // record 5-9 - Projection parameters for map transformation for (i = 5; i < 10; i++) if (!GetRecord(buf)) return false; // record 10 - Internal file-to-map projection transformation parameters if (!GetRecord(buf)) return false; // record 11 - SW quadrangle corner if (!GetRecord(buf)) return false; m_SW_lat.y = getd12(buf+6); m_SW_lat.x = getd12(buf+18); m_SW_utm.x = getd12(buf+36); m_SW_utm.y = getd12(buf+48); // record 12 - NW quadrangle corner if (!GetRecord(buf)) return false; m_NW_lat.y = getd12(buf+6); m_NW_lat.x = getd12(buf+18); m_NW_utm.x = getd12(buf+36); m_NW_utm.y = getd12(buf+48); // record 13 - NE quadrangle corner if (!GetRecord(buf)) return false; m_NE_lat.y = getd12(buf+6); m_NE_lat.x = getd12(buf+18); m_NE_utm.x = getd12(buf+36); m_NE_utm.y = getd12(buf+48); // record 14 - SE quadrangle corner if (!GetRecord(buf)) return false; m_SE_lat.y = getd12(buf+6); m_SE_lat.x = getd12(buf+18); m_SE_utm.x = getd12(buf+36); m_SE_utm.y = getd12(buf+48); // Special exception: DLG for Hawai`i that says "NAD27" is actually in // Old Hawaiian Datum (OHD) - so check for it. if ((iUTMZone == 4 || iUTMZone == 5) && m_SW_utm.y < 3500000) { if (iDatum == EPSG_DATUM_NAD27) iDatum = EPSG_DATUM_OLD_HAWAIIAN; } // We now know enough to set the projection. m_proj.SetProjectionSimple(true, iUTMZone, iDatum); // record 15 - category name, attribute format code, number of nodes... if (!GetRecord(buf)) return false; m_iNodes = geti6(buf + 24); m_iAreas = geti6(buf + 40); m_iLines = geti6(buf + 56); // allocate storage space m_nodes.resize(m_iNodes); m_areas.resize(m_iAreas); m_lines.resize(m_iLines); int total = m_iNodes + m_iAreas + m_iLines, elem = 0; // now read the nodes for (i = 0; i < m_iNodes; i++) { if (!GetRecord(buf)) return false; // do some safety checking if (buf[0] != 'N') break; // make sure node starts with a N int id = geti6(buf + 1); if (id != i+1) break; // got the right node number? m_nodes[i].m_p.x = getd12(buf+6); m_nodes[i].m_p.y = getd12(buf+18); m_nodes[i].m_iAttribs = geti6(buf + 48); int elements = geti6(buf + 36); int extra_records = ((elements*6) + 71) / 72 + (m_nodes[i].m_iAttribs>0); // linkage records for (int e = 0; e < extra_records; e++) if (!GetRecord(buf)) return false; if (progress_callback && (++elem % 20) == 0) progress_callback(elem * 100 / total); } // now read the areas for (i = 0; i < m_iAreas; i++) { if (!GetRecord(buf)) return false; // do some safety checking if (buf[0] != 'A') break; // make sure area starts with a A int id = geti6(buf + 1); if (id != i+1) break; // got the right area number? m_areas[i].m_p.x = getd12(buf+6); m_areas[i].m_p.y = getd12(buf+18); m_areas[i].m_iAttribs = geti6(buf + 48); int elements = geti6(buf + 36); int extra_records = ((elements*6) + 71) / 72 + (m_areas[i].m_iAttribs>0); // linkage records for (int e = 0; e < extra_records; e++) if (!GetRecord(buf)) return false; if (progress_callback && (++elem % 20) == 0) progress_callback(elem * 100 / total); } // now read the lines for (i = 0; i < m_iLines; i++) { if (!GetRecord(buf)) return false; // do some safety checking if (buf[0] != 'L') break; // make sure line starts with a L int id = geti6(buf + 1); if (id != i+1) break; // got the right area number? m_lines[i].m_iNode1 = geti6(buf+6); m_lines[i].m_iNode2 = geti6(buf+12); m_lines[i].m_iLeftArea = geti6(buf+18); m_lines[i].m_iRightArea = geti6(buf+24); m_lines[i].m_iCoords = geti6(buf + 42); m_lines[i].m_iAttribs = geti6(buf + 48); // coordinate records m_lines[i].m_p.SetSize(m_lines[i].m_iCoords); int offset = 0; double x, y; for (int c = 0; c < m_lines[i].m_iCoords; c++) { if (c%3 == 0) { if (!GetRecord(buf)) return false; offset = 0; } x = getd12(buf+offset); offset += 12; y = getd12(buf+offset); offset += 12; m_lines[i].m_p[c].x = x; m_lines[i].m_p[c].y = y; } // attribute records if (m_lines[i].m_iAttribs) { m_lines[i].m_attr.resize(m_lines[i].m_iAttribs); for (j = 0; j < m_lines[i].m_iAttribs; j++) { if (j%6 == 0) { if (!GetRecord(buf)) return false; offset = 0; } m_lines[i].m_attr[j].m_iMajorAttr = geti6(buf+offset); offset += 6; m_lines[i].m_attr[j].m_iMinorAttr = geti6(buf+offset); offset += 6; } } if (progress_callback && (++elem % 20) == 0) progress_callback(elem * 100 / total); } // all done, close up fclose(m_fp); m_fp = NULL; return true; }