SYSCALL_DEFINE1(orientlock_read, struct orientation_range *, orient) { int got_lock = 0; spin_lock(&system_call_lock); got_lock = read_check(orient); if (!got_lock) { spin_unlock(&system_call_lock); wait_event_interruptible(read_wait_queue, read_check(orient)); } else spin_unlock(&system_call_lock); return got_lock; }
int main(){ ll_check(); stack_check(); queue_check(); read_check(); path_check1(); path_check2(); return 0; }
unsigned int check_memory() { int count = 0; for (byte i = 0; i < TESTMEMLEN; i++) { count += read_check(i); } if (count == TESTMEMLEN*8) erase_memory(); return count; }
static VALUE my_recv(int io_wait, int argc, VALUE *argv, VALUE io) { struct io_args a; long n; prepare_read(&a, argc, argv, io); kgio_autopush_recv(io); if (a.len > 0) { retry: n = (long)recv(a.fd, a.ptr, a.len, MSG_DONTWAIT); if (read_check(&a, n, "recv", io_wait) != 0) goto retry; } return a.buf; }
static VALUE my_read(int io_wait, int argc, VALUE *argv, VALUE io) { struct io_args a; long n; prepare_read(&a, argc, argv, io); if (a.len > 0) { set_nonblocking(a.fd); retry: n = (long)read(a.fd, a.ptr, a.len); if (read_check(&a, n, "read", io_wait) != 0) goto retry; } return a.buf; }
static int elf_getnotes(struct lwp *lp, struct file *fp, size_t notesz) { int error; int nthreads; char *note; prpsinfo_t *psinfo; prstatus_t *status; prfpregset_t *fpregset; prsavetls_t *tls; //FIXME: find a valid way to retrieve numthreads on restore nthreads = (notesz - sizeof(prpsinfo_t) - 20)/(sizeof(prstatus_t) + sizeof(prfpregset_t) + sizeof(prsavetls_t) + 60); PRINTF(("reading notes header nthreads=%d\n", nthreads)); if (nthreads <= 0 || nthreads > CKPT_MAXTHREADS) return EINVAL; psinfo = kmalloc(sizeof(prpsinfo_t), M_TEMP, M_ZERO | M_WAITOK); status = kmalloc(nthreads*sizeof(prstatus_t), M_TEMP, M_WAITOK); fpregset = kmalloc(nthreads*sizeof(prfpregset_t), M_TEMP, M_WAITOK); tls = kmalloc(nthreads*sizeof(prsavetls_t), M_TEMP, M_WAITOK); note = kmalloc(notesz, M_TEMP, M_WAITOK); PRINTF(("reading notes section\n")); if ((error = read_check(fp, note, notesz)) != 0) goto done; error = elf_demarshalnotes(note, psinfo, status, fpregset, tls, nthreads); if (error) goto done; /* fetch register state from notes */ error = elf_loadnotes(lp, psinfo, status, fpregset, tls); done: if (psinfo) kfree(psinfo, M_TEMP); if (status) kfree(status, M_TEMP); if (fpregset) kfree(fpregset, M_TEMP); if (tls) kfree(tls, M_TEMP); if (note) kfree(note, M_TEMP); return error; }
/* * Read and check a single segment. * buffer: the buffer from which to read * file_map: the mapping containing the expected value of the segment * reading_small: Will the function read "SMALL_SEGMENT" bytes (1), * or "LARGE_SEGMENT" bytes (0)? * bytes_read: the number of bytes already read * returns the number of bytes read (which should never be 0), * or 0 on failure */ static size_t test_segment(file_buffer_t *buffer, unsigned char *file_map, int reading_small, size_t bytes_read) { size_t segment_size = reading_small ? SMALL_SEGMENT: LARGE_SEGMENT; size_t bytes_left = LARGE_SIZE - bytes_read; size_t bytes_expected = bytes_left < segment_size ? bytes_left : segment_size; debug_assert(bytes_left > 0); debug_assert(bytes_expected > 0); if (read_check(buffer, file_map, bytes_expected, segment_size)) { return bytes_expected; } return 0; }
static VALUE my_peek(int io_wait, int argc, VALUE *argv, VALUE io) { struct io_args a; long n; prepare_read(&a, argc, argv, io); kgio_autopush_recv(io); if (a.len > 0) { if (peek_flags == MSG_PEEK) set_nonblocking(a.fd); retry: n = (long)recv(a.fd, a.ptr, a.len, peek_flags); if (read_check(&a, n, "recv(MSG_PEEK)", io_wait) != 0) goto retry; } return a.buf; }
static int elf_gethdr(struct file *fp, Elf_Ehdr *ehdr) { size_t nbyte = sizeof(Elf_Ehdr); int error; if ((error = read_check(fp, ehdr, nbyte)) != 0) goto done; if (!(ehdr->e_ehsize == sizeof(Elf_Ehdr))) { PRINTF(("wrong elf header size: %d\n" "expected size : %zd\n", ehdr->e_ehsize, sizeof(Elf_Ehdr))); return EINVAL; } if (!(ehdr->e_phentsize == sizeof(Elf_Phdr))) { PRINTF(("wrong program header size: %d\n" "expected size : %zd\n", ehdr->e_phentsize, sizeof(Elf_Phdr))); return EINVAL; } if (!(ehdr->e_ident[EI_MAG0] == ELFMAG0 && ehdr->e_ident[EI_MAG1] == ELFMAG1 && ehdr->e_ident[EI_MAG2] == ELFMAG2 && ehdr->e_ident[EI_MAG3] == ELFMAG3 && ehdr->e_ident[EI_CLASS] == ELF_CLASS && ehdr->e_ident[EI_DATA] == ELF_DATA && ehdr->e_ident[EI_VERSION] == EV_CURRENT && ehdr->e_ident[EI_OSABI] == ELFOSABI_NONE && ehdr->e_ident[EI_ABIVERSION] == 0)) { PRINTF(("bad elf header\n there are %d segments\n", ehdr->e_phnum)); return EINVAL; } PRINTF(("Elf header size: %d\n", ehdr->e_ehsize)); PRINTF(("Program header size: %d\n", ehdr->e_phentsize)); PRINTF(("Number of Program headers: %d\n", ehdr->e_phnum)); done: return error; }
static int elf_getphdrs(struct file *fp, Elf_Phdr *phdr, size_t nbyte) { int i; int error; int nheaders = nbyte/sizeof(Elf_Phdr); PRINTF(("reading phdrs section\n")); if ((error = read_check(fp, phdr, nbyte)) != 0) goto done; PRINTF(("headers section:\n")); for (i = 0; i < nheaders; i++) { PRINTF(("entry type: %d\n", phdr[i].p_type)); PRINTF(("file offset: %jd\n", (intmax_t)phdr[i].p_offset)); PRINTF(("virt address: %p\n", (uint32_t *)phdr[i].p_vaddr)); PRINTF(("file size: %jd\n", (intmax_t)phdr[i].p_filesz)); PRINTF(("memory size: %jd\n", (intmax_t)phdr[i].p_memsz)); PRINTF(("\n")); } done: return error; }
static int smaller_read_tester(file_buffer_t *buffer, unsigned char *file_map) { return read_check(buffer, file_map, 1, 1); }
static bool R_LoadX42Swapped( model_t *mod, void *buffer, size_t header_size, int filesize, const char *name, const x42header_t *h ) { uint i; bool stat; x42PackHeader_v5_t pack; size_t inPos; const byte *inBuf; x42data_t *ret; const uint persist_flags = X42_PERSIST_EVERYTHING; ret = (x42data_t*)ri.Hunk_Alloc( sizeof( x42data_t ), h_low ); x42_SetupBufferPointers( ret, h, persist_flags ); inPos = sizeof( x42Header_ident_t ); switch( h->ident.version ) { case X42_VER_V5: inPos += sizeof( x42Header_v5_t ); break; default: ri.Printf( PRINT_ERROR, "Invalid x42 file version in model '%s'\n", name ); return false; } inBuf = (const byte*)buffer + inPos; #define read_check() if( !stat ) return false; else (void)0 #define checked_read( buf, type, count ) \ if( !const_cond_false ) \ { \ uint _i; \ size_t cb = sizeof( type ) * (count); \ \ stat = inPos + cb <= filesize; \ read_check(); \ \ Com_Memcpy( buf, inBuf, cb ); \ inBuf += cb; \ inPos += cb; \ \ for( _i = 0; _i < (count); _i++ ) \ swap_##type( buf + _i ); \ } \ else \ (void)0 #define checked_align( a ) \ if( !const_cond_false ) \ { \ size_t _a = (a); \ \ if( _a ) \ { \ size_t ofs = ((_a - (inPos % _a)) % _a); \ \ stat = inPos + ofs <= filesize; \ read_check(); \ \ inBuf += ofs; \ inPos += ofs; \ } \ } \ else \ (void)0 #define checked_read_a( buf, type, count, a ) \ if( !const_cond_false ) \ { \ checked_align( a ); \ checked_read( buf, type, count ); \ } \ else \ (void)0 #define checked_skip( size, a ) \ if( !const_cond_false ) \ { \ size_t cb = (size); \ \ checked_align( a ); \ stat = inPos + cb <= filesize; \ read_check(); \ \ inBuf += cb; \ inPos += cb; \ } \ else \ (void)0 checked_read( &pack, x42PackHeader_v5_t, 1 ); checked_read_a( ret->bones, x42Bone_v5_t, h->numBones, 8 ); checked_read_a( ret->animGroups, x42AnimGroup_v5_t, h->numAnimGroups, 8 ); for( i = 0; i < h->numAnimGroups; i++ ) { uint j; for( j = ret->animGroups[i].beginBone; j < ret->animGroups[i].endBone; j++ ) ret->boneGroups[j] = (u8)i; } checked_align( 2 ); stat = read_packed_floats_swp( &inBuf, filesize, &inPos, pack.animPosPack, 3, (float*)ret->posValues, h->numPosValues, sizeof( vec3_t ) ); read_check(); if( h->modelFlags & X42_MF_UNIFORM_SCALE ) { stat = read_packed_floats_swp( &inBuf, filesize, &inPos, &pack.animScalePack, 1, (float*)ret->scaleValues, h->numScaleValues, sizeof( vec3_t ) ); read_check(); for( i = 0; i < h->numScaleValues; i++ ) { ret->scaleValues[i][1] = ret->scaleValues[i][0]; ret->scaleValues[i][2] = ret->scaleValues[i][0]; } } else { vec2_t bdxyz[3]; bdxyz[0][0] = bdxyz[1][0] = bdxyz[2][0] = pack.animScalePack[0]; bdxyz[0][1] = bdxyz[1][1] = bdxyz[2][1] = pack.animScalePack[1]; stat = read_packed_floats_swp( &inBuf, filesize, &inPos, bdxyz, 3, (float*)ret->scaleValues, h->numScaleValues, sizeof( vec3_t ) ); read_check(); } stat = read_packed_floats_s16n_swp( &inBuf, filesize, &inPos, 4, (float*)ret->rotValues, h->numRotValues, sizeof( quat_t ) ); read_check(); checked_read_a( ret->keyStream, x42KeyStreamEntry_v5_t, h->keyStreamLength, 8 ); checked_read_a( ret->animations, x42Animation_v5_t, h->numAnims, 8 ); checked_read_a( ret->tags, x42Tag_v5_t, h->numTags, 8 ); checked_read_a( ret->influences, x42Influence_v5_t, h->numInfluences, 8 ); checked_read_a( ret->lods, x42LodRange_v5_t, h->numLods, 8 ); checked_read_a( ret->groups, x42Group_v5_t, h->numGroups, 8 ); checked_align( 2 ); stat = read_packed_floats_swp( &inBuf, filesize, &inPos, pack.vertPosPack, 3, (float*)&ret->vertPos[0].pos, h->numVerts, sizeof( x42vertAnim_t ) ); read_check(); for( i = 0; i < h->numGroups; i++ ) { uint j; const x42group_t *g = ret->groups + i; size_t cbElem; if( !g->maxVertInfluences ) continue; cbElem = sizeof( byte ) * ((g->maxVertInfluences - 1) * 2 + 1); for( j = 0; j < g->numVerts; j++ ) { uint k; u8 in[7]; float sum; x42vertAnim_t *v; checked_read( in, u8, cbElem ); v = ret->vertPos + g->firstVert + j; sum = 0; for( k = 0; k < g->maxVertInfluences - 1U; k++ ) { v->idx[k] = in[k * 2 + 0]; v->wt[k] = X42_FLOAT_U8N_UNPACK( in[k * 2 + 1] ); sum += v->wt[k]; } v->idx[k] = in[k * 2 + 0]; v->wt[k] = 1.0F - sum; } } if( ret->vertNorm ) { stat = read_packed_floats_s8n_swp( &inBuf, filesize, &inPos, 3, (float*)&ret->vertNorm[0].norm, h->numVerts, sizeof( x42vertNormal_t ) ); read_check(); } else if( h->modelFlags & X42_MF_HAS_NORMALS ) checked_skip( sizeof( s8[3] ) * h->numVerts, 0 ); if( ret->vertTan ) { stat = read_packed_floats_s8n_swp( &inBuf, filesize, &inPos, 7, (float*)&ret->vertTan[0].tan, h->numVerts, sizeof( x42vertTangent_t ) ); read_check(); for( i = 0; i < h->numVerts; i++ ) ret->vertTan[i].nfac1 = ret->vertTan[i].nfac0; } else if( h->modelFlags & X42_MF_HAS_TANGENT_BASIS ) checked_skip( sizeof( s8[7] ) * h->numVerts, 0 ); if( ret->vertTc ) { checked_align( 2 ); stat = read_packed_floats_swp( &inBuf, filesize, &inPos, pack.vertTcPack, 2, (float*)ret->vertTc, h->numVerts, sizeof( vec2_t ) ); read_check(); } else if( h->modelFlags & X42_MF_HAS_TEXTURE_COORDINATES ) checked_skip( sizeof( u16[2] ) * h->numVerts, 2 ); if( ret->vertCl ) { checked_read_a( ret->vertCl, rgba_t, h->numVerts, 4 ); } else if( h->modelFlags & X42_MF_HAS_COLORS ) checked_skip( sizeof( rgba_t ) * h->numVerts, 4 ); checked_read_a( ret->indices, x42Index_t, h->numIndices, 4 ); checked_read( (char*)ret->strings, u8, h->nameBlobLen ); #undef checked_skip #undef checked_read_a #undef checked_read #undef checked_align #undef read_check mod->type = MOD_X42; mod->x42 = ret; return true; }
static void read_dat(void) { char **arg, *arg_vector[MAX_ARGS], *option; static int pass = 0; char *fn = NULL; int i; program_name = arg_vector[0] = avp[0]; if (ac <= 1) moldyn_usage(); fn = *++avp; if (*fn == '-') moldyn_usage(); fptr = fopen(fn, "r"); if (fptr == NULL) moldyn_error("can't open file"); strcpy(name, fn); strtok(name, "."); for (i = 0; i < MAX_ARGS; i++) arg_vector[i] = NULL; read_check(fptr, &num_atoms, arg_vector); if (num_atoms > 40) { numbers = False; delta = -1; linewidth = 0.5; } for (pass = 0; pass < 2; pass++) { if (pass == 1) arg = avp; else arg = arg_vector; while (*++arg) { option = *arg++; if (!*arg) { moldyn_usage(); } if (!strcmp(option, "-atoms")) { num_atoms = atoi(*arg); if (num_atoms >= 40000) bonds = False; } else if (!strcmp(option, "-bonds")) { if (!strcmp(*arg, "yes")) { bonds = True; } else if (!strcmp(*arg, "no")) { bonds = False; } else if (!strcmp(*arg, "chain")) { bonds = chain = True; } else { moldyn_usage(); } } else if (!strcmp(option, "-box")) { if (!strcmp(*arg, "yes")) { box = True; } else if (!strcmp(*arg, "no")) { box = False; } else if (sscanf(*arg, "%lg", &size) != 1) { moldyn_usage(); } if (size > 0) { box = True; } } else if (!strcmp(option, "-delta")) { delta = atof(*arg); } else if (!strcmp(option, "-tolerance")) { tolerance = atof(*arg); } else if (!strcmp(option, "-linewidth")) { linewidth = atof(*arg); } else if (!strcmp(option, "-magstep")) { magstep = -atof(*arg); } else if (!strcmp(option, "-numbers")) { if (!strcmp(*arg, "on")) { numbers = True; } else if (!strcmp(*arg, "off")) { numbers = False; } else { moldyn_usage(); } } else if (!strncmp(option, "-radius", 7)) { if (option[7] == '\0') { radius = atof(*arg); } else { int ord; ord = atoi(&option[7]); element_radii[ord - 1] = fabs(atof(*arg)); } } else if (!strcmp(option, "-rot")) { rotation = atof(*arg); } else if (!strcmp(option, "-tilt")) { tilt = atof(*arg); } else if (!strcmp(option, "-step")) { step = atoi(*arg); } else if (!strcmp(option, "-povray")) { povray = atoi(*arg); step = abs(povray); } else if (!strcmp(option, "-resolution")) { resolution = atoi(*arg); if (resolution < 256) { resolution = 256; } else if (resolution > 2560) { resolution = 2560; } } else if (!strcmp(option, "-colors")) { if (!strcmp(*arg, "yes")) { colors = True; } else if (!strcmp(*arg, "no")) { colors = False; } else { moldyn_usage(); } } else if (!strncmp(option, "-color", 6)) { int R, G, B, ord; ord = atoi(&option[6]); findRGB(*arg, &R, &G, &B); element_colors[ord - 1][0] = R; element_colors[ord - 1][1] = G; element_colors[ord - 1][2] = B; } else if (!strcmp(option, "-autoscale")) { if (!strcmp(*arg, "yes")) { autoscale = True; } else if (!strcmp(*arg, "no")) { autoscale = False; } else { moldyn_usage(); } } else { moldyn_usage(); } } } if (!autoscale) { if (num_atoms > max_atoms) { max_atoms = num_atoms * 1.2; } else { max_atoms += 5; } allocate_atom_memory(); } pix = 0; }
int main(int argc, char **argv) { const char *outname; const char *format; const char *check; const char *alg; const char *sub; const char *sep; uint8_t raw[8192]; char from[128]; char to[128]; int ok; int i; alg = "sha1"; format = "multigest"; sep = sub = outname = check = NULL; from[0] = to[0] = 0x0; while ((i = getopt(argc, argv, "F:S:a:c:o:rs:")) != -1) { switch(i) { case 'F': format = optarg; break; case 'S': sep = optarg; break; case 'a': alg = optarg; break; case 'c': check = optarg; break; case 'o': outname = optarg; break; case 'r': getsubst(sub = ",\\$(Id|NetBSD)[^\n]*\\$,\044NetBSD\044", from, sizeof(from), to, sizeof(to)); break; case 's': getsubst(sub = optarg, from, sizeof(from), to, sizeof(to)); break; default: break; } } ok = 1; if (check) { if (!read_check(check)) { ok = 0; } } else if (optind == argc) { if (do_input(alg, raw, from, to)) { multigest_print_hex(raw, alg, outname, NULL, sub, sep, format); } else { ok = 0; } } else { for (i = optind ; i < argc ; i++) { if (multigest_file(alg, argv[i], raw, from, to) == NULL) { ok = 0; } else { multigest_print_hex(raw, alg, outname, argv[i], sub, sep, format); } } } exit((ok) ? EXIT_SUCCESS : EXIT_FAILURE); }
bool vulkan_buffer::create_internal(const bool copy_host_data, const compute_queue& cqueue) { const auto& vulkan_dev = ((const vulkan_device&)cqueue.get_device()).device; // create the buffer const VkBufferCreateInfo buffer_create_info { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, .flags = 0, // no sparse backing .size = size, // set all the bits here, might need some better restrictions later on // NOTE: not setting vertex bit here, b/c we're always using SSBOs .usage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), // TODO: probably want a concurrent option later on .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, }; VK_CALL_RET(vkCreateBuffer(vulkan_dev, &buffer_create_info, nullptr, &buffer), "buffer creation failed", false) // allocate / back it up VkMemoryRequirements mem_req; vkGetBufferMemoryRequirements(vulkan_dev, buffer, &mem_req); const VkMemoryAllocateInfo alloc_info { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .pNext = nullptr, .allocationSize = mem_req.size, .memoryTypeIndex = find_memory_type_index(mem_req.memoryTypeBits, true /* prefer device memory */), }; VK_CALL_RET(vkAllocateMemory(vulkan_dev, &alloc_info, nullptr, &mem), "buffer allocation failed", false) VK_CALL_RET(vkBindBufferMemory(vulkan_dev, buffer, mem, 0), "buffer allocation binding failed", false) // update buffer desc info buffer_info.buffer = buffer; buffer_info.offset = 0; buffer_info.range = size; // buffer init from host data pointer if(copy_host_data && host_ptr != nullptr && !has_flag<COMPUTE_MEMORY_FLAG::NO_INITIAL_COPY>(flags)) { if(!write_memory_data(cqueue, host_ptr, size, 0, 0, "failed to initialize buffer with host data (map failed)")) { return false; } } return true; } vulkan_buffer::~vulkan_buffer() { if(buffer != nullptr) { vkDestroyBuffer(((const vulkan_device&)dev).device, buffer, nullptr); buffer = nullptr; } buffer_info = { nullptr, 0, 0 }; } void vulkan_buffer::read(const compute_queue& cqueue, const size_t size_, const size_t offset) { read(cqueue, host_ptr, size_, offset); } void vulkan_buffer::read(const compute_queue& cqueue, void* dst, const size_t size_, const size_t offset) { if(buffer == nullptr) return; const size_t read_size = (size_ == 0 ? size : size_); if(!read_check(size, read_size, offset, flags)) return; GUARD(lock); read_memory_data(cqueue, dst, read_size, offset, 0, "failed to read buffer"); } void vulkan_buffer::write(const compute_queue& cqueue, const size_t size_, const size_t offset) { write(cqueue, host_ptr, size_, offset); }
/*! */ void Md_method::run(Program_options & options, ostream & output) { output.precision(10); string logfile=options.runid+".log"; prop.setLog(logfile, log_label); int natoms=sys->nIons(); Array1 < Array1 <int> > atom_move; Array1 < Array2 <doublevar> > displace; //Even numbered displacements are in + direction, odd are in // - direction if(natoms==2) { //For a dimer, assume they are oriented in the z //direction and only move in that direction. atom_move.Resize(4); displace.Resize(4); int count=0; for(int at=0; at < natoms; at++) { for(int s=0; s< 2; s++) { atom_move(count).Resize(1); displace(count).Resize(1,3); atom_move(count)(0)=at; displace(count)=0; if(s==0) displace(count)(0,2)=0.00025; else displace(count)(0,2)=-0.00025; count++; } } } else { int ndim=0; for(int d=0; d< 3; d++) { if(restrict_dimension(d) ==0) ndim++; } atom_move.Resize(2*ndim*natoms); displace.Resize(2*ndim*natoms); int count=0; for(int at=0; at< natoms; at++) { for(int d=0; d< 3; d++) { if(!restrict_dimension(d) ) { for(int s=0; s< 2; s++) { atom_move(count).Resize(1); displace(count).Resize(1,3); atom_move(count)(0)=at; displace(count)=0; if(s==0) displace(count)(0,d)=0.00025; else displace(count)(0,d)=-0.00025; count++; } } } } } prop.setDisplacement(atom_move, displace); Properties_final_average curravg; string vmcout=options.runid+".embed"; ofstream vmcoutput; if(output) vmcoutput.open(vmcout.c_str()); Array3 <doublevar> ionpos(nstep+2, natoms, 3, 0.0); Array1 <doublevar> temppos(3); for(int s=0; s< 2; s++) { for(int at=0; at< natoms; at++) { sys->getIonPos(at, temppos); for(int d=0; d< 3; d++) { ionpos(s,at,d)=temppos(d); } } } if(readcheckfile != "") { read_check(ionpos); } for(int s=0; s< 2; s++) { Array2 <doublevar> pos(ionpos(s)); recenter(pos, atomic_weights); } for(int step=0; step < nstep; step++) { int currt=step+1; //current time for(int at=0; at< natoms; at++) { for(int d=0; d< 3; d++) { temppos(d)=ionpos(currt, at, d); } sys->setIonPos(at, temppos); } qmc_avg->runWithVariables(prop, sys, wfdata, pseudo, vmcoutput); prop.getFinal(curravg); if(output) output << "*****Step " << step << endl; Array2 <doublevar> force(natoms, 3, 0.0); Array2 <doublevar> force_err(natoms, 3, 0.0); for(int f=0; f< atom_move.GetDim(0); f+=2 ) { for(int m=0; m < atom_move(f).GetDim(0); m++) { int at=atom_move(f)(m); for(int d=0; d< 3; d++) { //Take a finite difference between the two forces doublevar prop=fabs(displace(f)(m,d)/curravg.aux_size(f)); doublevar fin_diff=(curravg.aux_diff(f,0)-curravg.aux_diff(f+1,0)) /(2*curravg.aux_size(f)); force(at,d)+= -prop*fin_diff; force_err(at,d)+=prop*(curravg.aux_differr(f,0) +curravg.aux_differr(f+1,0)) /(2*curravg.aux_size(f))/(2*curravg.aux_size(f)); } } } for(int at=0; at< natoms; at++) { for(int d=0; d< 3; d++) force_err(at,d)=sqrt(force_err(at,d)); } //Make sure that Newton's laws are actually followed for //two atoms; we can do this for more, as well. if(natoms==2) { doublevar average=(force(0,2)-force(1,2))/2.0; force(0,2)=average; force(1,2)=-average; } //Verlet algorithm.. for(int at=0; at< natoms; at++) { for(int d=0; d< 3; d++) { ionpos(currt+1, at, d)=ionpos(currt, at,d) +(1-damp)*(ionpos(currt, at, d)-ionpos(currt-1, at,d)) +tstep*tstep*force(at,d)/atomic_weights(at); //cout << "pos " << ionpos(currt, at,d) // << " last " << ionpos(currt-1, at,d) // << " weight " << atomic_weights(at) << endl; } } Array2 <doublevar> currpos(ionpos(currt+1)); recenter(currpos, atomic_weights); doublevar kinen=0; int field=output.precision() + 15; for(int at=0; at < natoms; at++) { if(output) output << "position" << at << " " << setw(field) << ionpos(currt+1, at, 0) << setw(field) << ionpos(currt+1, at, 1) << setw(field) << ionpos(currt+1, at, 2) << endl; Array1 <doublevar> velocity(3); for(int d=0; d< 3; d++) { velocity(d)=(ionpos(currt+1, at, d)-ionpos(currt-1, at,d))/(2*tstep); } for(int d=0;d < 3; d++) { kinen+=.5*atomic_weights(at)*velocity(d)*velocity(d); } if(output ) { output << "velocity" << at << " " << setw(field) << velocity(0) << setw(field) << velocity(1) << setw(field) << velocity(2) << endl; output << "force" << at << " " << setw(field) << force(at,0) << setw(field) << force(at, 1) << setw(field) << force(at,2) << endl; output << "force_err" << at << setw(field) << force_err(at, 0) << setw(field) << force_err(at, 1) << setw(field) << force_err(at, 2) << endl; //output << "force" << at << "z " << force(at,2) << " +/- " // << force_err(at,2) << endl; } } if(output ) { output << "kinetic_energy " << kinen << endl; output << "electronic_energy " << curravg.total_energy(0) << " +/- " << sqrt(curravg.total_energyerr(0)) << endl; output << "total_energy " << curravg.total_energy(0)+kinen << " +/- " << sqrt(curravg.total_energyerr(0)) << endl; if(writecheckfile != "") if(output) write_check(ionpos, currt+1); } } if(output) vmcoutput.close(); }
static int full_read_tester(file_buffer_t *buffer, unsigned char *file_map) { return read_check(buffer, file_map, LARGE_SIZE, LARGE_SIZE); }
static int small_read_tester(file_buffer_t *buffer, unsigned char *file_map) { return read_check(buffer, file_map, SMALL_SIZE, PAGE_SIZE); }
static int jumping_read_tester(file_buffer_t *buffer, unsigned char *file_map) { /* Jump to the end, and don't expect to read any bytes. */ if (fseek_buffer(buffer, 0, SEEK_END)) { printlg(ERROR_LEVEL, "Failed to jump to end of file.\n"); return 0; } if (!check_location(buffer, LARGE_SIZE)) { printlg(ERROR_LEVEL, "Failed to reach end of file.\n"); return 0; } if (fgetc_buffer(buffer) != EOF) { printlg(ERROR_LEVEL, "Did not expect to " "read a byte at the end of the file.\n"); return 0; } /* Read a page, and then read some of the earlier bytes. */ if (!check_rewind(buffer)) { printlg(ERROR_LEVEL, "Failed to rewind from end.\n"); return 0; } debug_assert(LARGE_SIZE > PAGE_SIZE); if (!read_check(buffer, file_map, PAGE_SIZE, PAGE_SIZE)) { printlg(ERROR_LEVEL, "Failed to read first page.\n"); return 0; } #define PAGE_BACK_JUMP (-PAGE_SIZE * 3 / 4) if (fseek_buffer(buffer, -PAGE_BACK_JUMP, SEEK_CUR)) { printlg(ERROR_LEVEL, "Failed to jump into first page.\n"); return 0; } if (!check_location(buffer, PAGE_SIZE - PAGE_BACK_JUMP)) { printlg(ERROR_LEVEL, "Failed to jump into first page.\n"); return 0; } if (!read_check(buffer, file_map, SMALL_SEGMENT, SMALL_SEGMENT)) { printlg(ERROR_LEVEL, "Failed to read inside first page.\n"); return 0; } /* Rewind, and read part of the first page. */ if (!check_rewind(buffer)) { printlg(ERROR_LEVEL, "Failed to rewind from inside page.\n"); return 0; } if (!read_check(buffer, file_map, SMALL_SEGMENT, SMALL_SEGMENT)) { printlg(ERROR_LEVEL, "Failed to read beginning.\n"); return 0; } /* Read after the first page, and read beginning again. */ debug_assert(LARGE_SIZE > LARGE_SEGMENT + SMALL_SEGMENT); if (fseek_buffer(buffer, LARGE_SEGMENT, SEEK_SET)) { printlg(ERROR_LEVEL, "Failed to jump past first page.\n"); return 0; } if (!check_location(buffer, LARGE_SEGMENT)) { printlg(ERROR_LEVEL, "Failed to reach location past first page.\n"); return 0; } if (!read_check(buffer, file_map, SMALL_SEGMENT, SMALL_SEGMENT)) { printlg(ERROR_LEVEL, "Failed to read after first page.\n"); return 0; } if (!check_rewind(buffer)) { printlg(ERROR_LEVEL, "Failed to rewind to first page.\n"); return 0; } if (!read_check(buffer, file_map, SMALL_SEGMENT, SMALL_SEGMENT)) { printlg(ERROR_LEVEL, "Failed to read first page " "after jumping back to it.\n"); return 0; } return 1; }