/** * Description not yet available. * \param */ dvar3_array dvar3_array::sub(int nrl,int nrh) { if (allocated(*this)) { dvar3_array tmp(nrl,nrh); for (int i=nrl; i<=nrh; i++) { tmp[i].shallow_copy((*this)(i)); } return tmp; } else { return *this; } }
void param_init_vector::sd_vscale(const dvar_vector& _d, const dvar_vector& x,const int& _ii) { if (allocated(*this)) { int& ii=(int&) _ii; dvar_vector& d=(dvar_vector&) _d; int mmin=indexmin(); int mmax=indexmax(); for (int i=mmin;i<=mmax;i++) { d(ii)=1.0; ii++; } } }
void param_init_bounded_vector::sd_vscale(const dvar_vector& _v, const dvar_vector& x,const int& _ii) { if (allocated(*this)) { int& ii=(int&) _ii; dvar_vector& v=(dvar_vector&) _v; int mmin=indexmin(); int mmax=indexmax(); //double pen=0; for (int i=mmin;i<=mmax;i++) { v(ii)=dfboundp(x(ii),minb,maxb); ii++; } } }
adpvm_slave_args::adpvm_slave_args(int _num_args,int _length_args) { //char ** argv; counter=1; num_args=_num_args; if (allocated(length_args)) length_args.deallocate(); length_args.allocate(0,num_args-1); length_args=_length_args; argv = new charptr[num_args+1]; argv[0] = new char[20]; for (int i = 1; i < num_args; i++) { argv[i] = NULL; } argv[num_args]=NULL; }
void CollectionMap::rollbackCreate() { if (!allocated()) { return; } // If we are rolling back the database creation, then any collections in that database were // created in this transaction. Since we roll back collection creates before dictionary // creates, we would have already rolled back the collection creation, which does close_ns, // which removes the Collection from the map. So this must be empty. verify(_collections.empty()); // Closing the DB before the transaction aborts will allow the abort to do the dbremove for us. shared_ptr<storage::Dictionary> metadb = _metadb; _metadb.reset(); const int r = metadb->close(); if (r != 0) { storage::handle_ydb_error(r); } }
void pvm_number::assign(const dvector& u) { if(ad_comm::pvm_manager) { int nsp=ad_comm::pvm_manager->num_slave_processes; if (u.indexmin() !=0 || u.indexmax() != nsp) { cerr << "Error in pvm_number::assign valid index bounds must be 0 " << ad_comm::pvm_manager->num_slave_processes << endl; ad_exit(1); } if (allocated(v)) v.deallocate(); v.allocate(0,nsp); v=u; d=u(0); } }
void cached_allocation(size_type n, multiallocation_chain &chain) { size_type count = n, allocated(0); BOOST_TRY{ //If don't have any cached node, we have to get a new list of free nodes from the pool while(!m_cached_nodes.empty() && count--){ void *ret = ipcdetail::to_raw_pointer(m_cached_nodes.pop_front()); chain.push_back(ret); ++allocated; } if(allocated != n){ mp_node_pool->allocate_nodes(n - allocated, chain); } } BOOST_CATCH(...){ this->cached_deallocation(chain); BOOST_RETHROW }
/** * Description not yet available. * \param */ void adpvm_pack(const dvar_matrix & _m) { dvar_matrix& m = (dvar_matrix &) _m; if (allocated(m)) { int imin=m.indexmin(); int imax=m.indexmax(); pvm_pkint(&imin,1,1); pvm_pkint(&imax,1,1); for (int i=imin;i<=imax;i++) adpvm_pack(m(i)); } else { int imin=0; int imax=-1; pvm_pkint(&imin,1,1); pvm_pkint(&imax,1,1); } }
/** * Description not yet available. * \param */ void adpvm_pack(const dvector& _v) { dvector& v =(dvector&) _v; if (allocated(v)) { int imin=v.indexmin(); int imax=v.indexmax(); pvm_pkint(&imin,1,1); pvm_pkint(&imax,1,1); pvm_pkdouble(&(v(imin)),imax-imin+1,1); } else { int imin=0; int imax=-1; pvm_pkint(&imin,1,1); pvm_pkint(&imax,1,1); } }
/** * Description not yet available. * \param */ void adpvm_pack(const i3_array & _m) { i3_array& m = (i3_array &) _m; if (allocated(m)) { int imin=m.indexmin(); int imax=m.indexmax(); pvm_pkint(&imin,1,1); pvm_pkint(&imax,1,1); for (int i=imin;i<=imax;i++) adpvm_pack(m(i)); } else { int imin=0; int imax=-1; pvm_pkint(&imin,1,1); pvm_pkint(&imax,1,1); } }
bool CollectionMap::close_ns(const StringData& ns, const bool aborting) { Lock::assertWriteLocked(ns); // No need to initialize first. If the metadb is null at this point, // we simply say that the ns you want to close wasn't open. if (!allocated()) { return false; } // Find and erase the old entry, if it exists. CollectionStringMap::const_iterator it = _collections.find(ns); if (it != _collections.end()) { // TODO: Handle the case where a client tries to close a load they didn't start. shared_ptr<Collection> cl = it->second; _collections.erase(ns); cl->close(aborting); return true; } return false; }
// on input, _initLock is held, so this can be called by only one thread at a time, // also, on input, the CollectionMap must be allocated Collection *CollectionMap::open_ns(const StringData& ns, const bool bulkLoad) { verify(allocated()); BSONObj serialized; BSONObj nsobj = BSON("ns" << ns); storage::Key sKey(nsobj, NULL); DBT ndbt = sKey.dbt(); // If this transaction is read only, then we cannot possible already // hold a lock in the metadb and we certainly don't need to hold one // for the duration of this operation. So we use an alternate txn stack. const bool needAltTxn = !cc().hasTxn() || cc().txn().readOnly(); scoped_ptr<Client::AlternateTransactionStack> altStack(!needAltTxn ? NULL : new Client::AlternateTransactionStack()); scoped_ptr<Client::Transaction> altTxn(!needAltTxn ? NULL : new Client::Transaction(0)); // Pass flags that get us a write lock on the metadb row // for the ns we'd like to open. DB *db = _metadb->db(); const int r = db->getf_set(db, cc().txn().db_txn(), DB_SERIALIZABLE | DB_RMW, &ndbt, getf_serialized, &serialized); if (r == 0) { // We found an entry for this ns and we have the row lock. // First check if someone got the lock before us and already // did the open. Collection *cl = find_ns(ns); if (cl != NULL) { return cl; } // No need to hold the openRWLock during Collection::make(), // the fact that we have the row lock ensures only one thread will // be here for a particular ns at a time. shared_ptr<Collection> details = Collection::make( serialized, bulkLoad ); SimpleRWLock::Exclusive lk(_openRWLock); verify(!_collections[ns]); _collections[ns] = details; return details.get(); } else if (r != DB_NOTFOUND) { storage::handle_ydb_error(r); } return NULL; }
/** * Check if getRevocableExecutors function properly filters out PR executors. * * TODO(skonefal): Does it really work? */ TEST(HelperFunctionsTest, getRevocableExecutors) { Try<mesos::FixtureResourceUsage> usages = JsonUsage::ReadJson(QOS_FIXTURE); if (usages.isError()) { LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl; } ResourceUsage usage; usage.CopyFrom(usages.get().resource_usage(0)); std::list<ResourceUsage_Executor> ret = ResourceUsageHelper::getRevocableExecutors(usage); ASSERT_EQ(3u, ret.size()); // Expected only BE executors. for (auto executor : ret) { Resources allocated(executor.allocated()); EXPECT_FALSE(allocated.revocable().empty()); } }
void CollectionMap::getNamespaces( list<string>& tofill ) { init(); if (!allocated()) { return; } getNamespacesExtra extra(tofill); storage::Cursor c(_metadb->db()); int r = 0; while (r != DB_NOTFOUND) { r = c.dbc()->c_getf_next(c.dbc(), 0, getNamespacesCallback, &extra); if (r == -1) { verify(extra.ex != NULL); throw *extra.ex; } if (r != 0 && r != DB_NOTFOUND) { storage::handle_ydb_error(r); } } }
/** * Description not yet available. * \param */ int sub_unallocated(const i4_array& m) { int iflag=0; int mmin=m.indexmin(); int mmax=m.indexmax(); if (!allocated(m)) { iflag=1; return iflag; } for (int i=mmin;i<=mmax;i++) { if (sub_unallocated(m(i))) { iflag=1; break; } } return iflag; }
void CollectionMap::getNamespaces( list<string>& tofill ) { init(); if (!allocated()) { return; } getNamespacesExtra extra(tofill); storage::Cursor c(_metadb->db()); int r = 0; while (r != DB_NOTFOUND) { r = c.dbc()->c_getf_next(c.dbc(), 0, getNamespacesCallback, &extra); if (r == -1) { extra.throwException(); msgasserted(17322, "got -1 from cursor iteration but didn't save an exception"); } if (r != 0 && r != DB_NOTFOUND) { storage::handle_ydb_error(r); } } }
void ByteArray::resize(size_t size) { // Trivial case if (size == this->size()) return; // Check if we need to reallocate if (size > allocated()) { // Temporary copy ByteArray tmp(size); std::memcpy(tmp.data(), constData(), std::min(tmp.size(), this->size())); // Let assignment operator handle the rest d = tmp.d; } else { // Detach and set new size d->size = size; } }
/** * Description not yet available. * \param */ void adpvm_unpack(const ivector& _v) { ivector& v = (ivector&) _v; int imin; int imax; pvm_upkint(&imin,1,1); pvm_upkint(&imax,1,1); if (allocated(v)) { if (v.indexmin()!=imin) { cerr << "Error in min index in" " void adpvm_unpack(const dvector& v)" << endl; ad_exit(1); } if (v.indexmax()!=imax) { cerr << "Error in max index in" " void adpvm_unpack(const dvector& v)" << endl; ad_exit(1); } } else { v.allocate(imin,imax); } pvm_upkint(&(v(imin)),imax-imin+1,1); }
/** * Description not yet available. * \param */ void adpvm_unpack(const d5_array & _m) { d5_array& m = (d5_array &) _m; int imin; int imax; pvm_upkint(&imin,1,1); pvm_upkint(&imax,1,1); if (allocated(m)) { if (m.indexmin()!=imin) { cerr << "Error in min index in" " void adpvm_unpack(const dvector& v)" << endl; ad_exit(1); } if (m.indexmax()!=imax) { cerr << "Error in max index in" " void adpvm_unpack(const dvector& v)" << endl; ad_exit(1); } } else { m.allocate(imin,imax); } for (int i=imin;i<=imax;i++) adpvm_unpack(m(i)); }
/** * Description not yet available. * \param */ dvar_matrix_position::dvar_matrix_position(const dvar_matrix& m,int x) : lb(m.rowmin(),m.rowmax()), ub(m.rowmin(),m.rowmax()), ptr(m.rowmin(),m.rowmax()) { row_min=m.rowmin(); row_max=m.rowmax(); for (int i=row_min;i<=row_max;i++) { if (allocated(m(i))) { lb(i)=m(i).indexmin(); ub(i)=m(i).indexmax(); ptr(i)=m(i).get_va(); } else { lb(i)=0; ub(i)=-1; ptr(i)=0; } } }
multiallocation_chain cached_allocation(std::size_t n) { multiallocation_chain chain; std::size_t count = n, allocated(0); BOOST_TRY{ //If don't have any cached node, we have to get a new list of free nodes from the pool while(!m_cached_nodes.empty() && count--){ void *ret = detail::get_pointer(m_cached_nodes.front()); m_cached_nodes.pop_front(); chain.push_back(ret); ++allocated; } if(allocated != n){ multiallocation_chain chain2(mp_node_pool->allocate_nodes(n - allocated)); chain.splice_after(chain.last(), chain2, chain2.before_begin(), chain2.last(), n - allocated); } return boost::interprocess::move(chain); } BOOST_CATCH(...){ this->cached_deallocation(boost::interprocess::move(chain)); BOOST_RETHROW }
// The block [blk_start, blk_end) has been allocated; // adjust the block offset table to represent this information; // right-open interval: [blk_start, blk_end) void G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { mark_block(blk_start, blk_end); allocated(blk_start, blk_end); }
void wsdump(FILE * f1) { nialptr startaddr, addr, highest, next; nialint cnt; #ifdef DEBUG if (!continueflag) /* don't memchk if storing continue */ memchk(); #endif #ifdef CALLDLL /* This will clear all is_loaded flags for DLL , so that when this workspace is loaded later, the DLL will be reloaded */ DLLclearflags(); #endif /* find the address of the highest free block */ highest = freelisthdr, next = freelisthdr; while (next != TERMINATOR) { if (next > highest) highest = next; next = fwdlink(next); } /* store global giving workspace size */ if (highest + blksize(highest) == memsize) wssize = highest; else wssize = memsize; /* there is a used block at the end */ /* set a version stamp for the workspace */ #ifdef DEBUG strcpy(wsstamp, systemname); strcat(wsstamp, nialversion); strcat(wsstamp, " (debug)"); #else strcpy(wsstamp, systemname); strcat(wsstamp, nialversion); #endif /* write out global structure */ testerr(writeblock(f1, (char *) &G, (long) sizeof G, false, 0L, 0)); /* loop to write out allocated blocks */ addr = membase; startaddr = addr; cnt = 0; while (addr < memsize) { if (allocated(addr)) { cnt += blksize(addr); addr += blksize(addr); } else { /* write out the block */ #ifdef DEBUG if (cnt == 0) { nprintf(OF_DEBUG, "second empty block in a row found in wsdump\n"); showfl(); nabort(NC_ABORT_F); } #endif /* nprintf(OF_DEBUG,"writing block startaddr %d cnt %d \n",startaddr,cnt); */ testerr(writeblock(f1, (char *) &cnt, (long) sizeof cnt, false, 0L, 0)); testerr(writeblock(f1, (char *) &startaddr, (long) sizeof addr, false, 0L, 0)); testerr(writeblock(f1, (char *) &mem[startaddr], (long) bytespu * cnt, false, 0L, 0)); /* prepare for next block after free space */ /* nprintf(OF_DEBUG,"skipping free block addr %d blksize %d\n",addr,blksize(addr)); */ addr = addr + blksize(addr); startaddr = addr; cnt = 0; } } if (startaddr != memsize) {/* there is a last block to write out */ /* nprintf(OF_DEBUG,"writing last block startaddr %d cnt %d \n",startaddr,cnt); */ testerr(writeblock(f1, (char *) &cnt, (long) sizeof cnt, false, 0L, 0)); testerr(writeblock(f1, (char *) &startaddr, (long) sizeof addr, false, 0L, 0)); testerr(writeblock(f1, (char *) &mem[startaddr], (long) bytespu * cnt, false, 0L, 0)); } /* write final cnt == 0 to signal end of workspace */ cnt = 0; testerr(writeblock(f1, (char *) &cnt, (long) sizeof cnt, false, 0L, 0)); closefile(f1); return; fail: /* the testerr and testrderr macros branch here on an error */ nprintf(OF_NORMAL_LOG, errmsgptr); nprintf(OF_NORMAL_LOG, "workspace failed to write correctly\n"); closefile(f1); exit_cover(NC_WS_WRITE_ERR_W); }
/** * Description not yet available. * \param */ void laplace_approximation_calculator:: check_hessian_type(function_minimizer * pfmin) { int ip = 0; if (quadratic_prior::get_num_quadratic_prior()>0) { hesstype=4; if (allocated(Hess)) { if (Hess.indexmax()!=usize) { Hess.deallocate(); Hess.allocate(1,usize,1,usize); } } else { Hess.allocate(1,usize,1,usize); } if (allocated(Hessadjoint)) { if (Hessadjoint.indexmax()!=usize) { Hessadjoint.deallocate(); Hessadjoint.allocate(1,usize,1,usize); } } else { Hessadjoint.allocate(1,usize,1,usize); } return; } else { int nv=initial_df1b2params::set_index(); if (allocated(used_flags)) { if (used_flags.indexmax() != nv) { used_flags.safe_deallocate(); } } if (!allocated(used_flags)) { used_flags.safe_allocate(1,nv); } //for (ip=1;ip<=num_der_blocks;ip++) { used_flags.initialize(); // do we need to reallocate memory for df1b2variables? check_for_need_to_reallocate(ip); df1b2_gradlist::set_no_derivatives(); //cout << re_objective_function_value::pobjfun << endl; //cout << re_objective_function_value::pobjfun->ptr << endl; (*re_objective_function_value::pobjfun)=0; df1b2variable pen=0.0; df1b2variable zz=0.0; initial_df1b2params::reset(y,pen); // call function to do block diagonal newton-raphson // the step vector from the newton-raphson is in the vector step df1b2_gradlist::set_no_derivatives(); funnel_init_var::lapprox=this; block_diagonal_flag=5; quadratic_prior::in_qp_calculations=1; if (sparse_hessian_flag) { // just to get the number of separable calls separable_calls_counter=0; pfmin->AD_uf_inner(); // allocate space for uncompressed sparse hessian information //num_separable_calls=separable_calls_counter; if (triplet_information==0) { triplet_information =new i3_array(1,separable_calls_counter); } else if ( triplet_information->indexmax() != separable_calls_counter) { delete triplet_information; triplet_information =new i3_array(1,separable_calls_counter); } triplet_information->initialize(); separable_calls_counter=0; } pfmin->pre_user_function(); if (sparse_hessian_flag) { // turn triplet_informaiton into compressed_triplet_information int mmin= triplet_information->indexmin(); int mmax= triplet_information->indexmax(); int ndim=0; for (int i=mmin;i<=mmax;i++) { if (allocated((*triplet_information)(i))) { ndim+=(*triplet_information)(i,1).indexmax(); } } if (compressed_triplet_information) { delete compressed_triplet_information; compressed_triplet_information=0; } compressed_triplet_information=new imatrix(1,ndim,1,3); (*compressed_triplet_information)(3).fill_seqadd(1,1); int ii=0; for (int i=mmin;i<=mmax;i++) { if (allocated((*triplet_information)(i))) { int jmin=(*triplet_information)(i,1).indexmin(); int jmax=(*triplet_information)(i,1).indexmax(); for (int j=jmin;j<=jmax;j++) { ii++; (*compressed_triplet_information)(ii,1)= (*triplet_information)(i,1,j); (*compressed_triplet_information)(ii,2)= (*triplet_information)(i,2,j); (*compressed_triplet_information)(ii,3)=ii; } } } imatrix & cti= *compressed_triplet_information; cti=sort(cti,1); int lmin=1; int lmax=0; for (int i=2;i<=ndim;i++) { if (cti(i,1)>cti(i-1,1)) { lmax=i-1; cti.sub(lmin,lmax)=sort(cti.sub(lmin,lmax),2); lmin=i; } } cti.sub(lmin,ndim)=sort(cti.sub(lmin,ndim),2); imatrix tmp=trans(cti); delete compressed_triplet_information; compressed_triplet_information=new imatrix(tmp); } quadratic_prior::in_qp_calculations=0; int non_block_diagonal=0; for (int i=xsize+1;i<=xsize+usize;i++) { if (used_flags(i)>1) { non_block_diagonal=1; break; } } if (non_block_diagonal) { if (bw< usize/2 && sparse_hessian_flag==0) { hesstype=3; //banded if (bHess) { if (bHess->bandwidth() !=bw) { delete bHess; bHess = new banded_symmetric_dmatrix(1,usize,bw); if (bHess==0) { cerr << "Error allocating banded_symmetric_dmatrix" << endl; ad_exit(1); } } } else { bHess = new banded_symmetric_dmatrix(1,usize,bw); if (bHess==0) { cerr << "Error allocating banded_symmetric_dmatrix" << endl; ad_exit(1); } } if (bHessadjoint) { if (bHessadjoint->bandwidth() !=bw) { delete bHessadjoint; bHessadjoint = new banded_symmetric_dmatrix(1,usize,bw); if (bHessadjoint==0) { cerr << "Error allocating banded_symmetric_dmatrix" << endl; ad_exit(1); } } } else { bHessadjoint = new banded_symmetric_dmatrix(1,usize,bw); if (bHessadjoint==0) { cerr << "Error allocating banded_symmetric_dmatrix" << endl; ad_exit(1); } } } else { //check_sparse_matrix_structure(); hesstype=4; // band is so wide so use full matrix if (bHess) { delete bHess; bHess=0; } if (bHessadjoint) { delete bHessadjoint; bHessadjoint=0; } if (allocated(Hess)) { if (sparse_hessian_flag) { Hess.deallocate(); } else { if (Hess.indexmax() != usize) { Hess.deallocate(); Hess.allocate(1,usize,1,usize); } } } else { if (sparse_hessian_flag==0) Hess.allocate(1,usize,1,usize); } if (sparse_hessian_flag) { make_sparse_triplet(); } if (allocated(Hessadjoint)) { if (sparse_hessian_flag) { Hess.deallocate(); } else { if (Hessadjoint.indexmax() != usize) { Hessadjoint.deallocate(); Hessadjoint.allocate(1,usize,1,usize); } } } else { if (sparse_hessian_flag==0) Hessadjoint.allocate(1,usize,1,usize); } } } else { hesstype=2; } if (hesstype==2 && num_importance_samples>0) { if (importance_sampling_components) { delete importance_sampling_components; importance_sampling_components=0; } importance_sampling_components= new dvar_matrix(1,pmin->lapprox->num_separable_calls, 1,num_importance_samples); } if (hesstype==2 && (num_importance_samples>0 || use_gauss_hermite>0)) { const ivector & itmp=(*num_local_re_array)(1,num_separable_calls); const ivector & itmpf=(*num_local_fixed_array)(1,num_separable_calls); // **************************************************** // **************************************************** if (antiflag>0) { // generate antithetical rv's generate_antithetical_rvs(); } if (use_gauss_hermite>0) { if (gh) { delete gh; gh=0; } gh=new gauss_hermite_stuff(this,use_gauss_hermite, num_separable_calls,itmp); } if (block_diagonal_vch) { delete block_diagonal_vch; block_diagonal_vch=0; } block_diagonal_vch = new dvar3_array(1,num_separable_calls, 1,itmp,1,itmp); if (block_diagonal_ch) { delete block_diagonal_ch; block_diagonal_ch=0; } block_diagonal_ch = new d3_array(1,num_separable_calls, 1,itmp,1,itmp); if (block_diagonal_hessian) { delete block_diagonal_hessian; block_diagonal_hessian=0; } block_diagonal_hessian = new d3_array(1,num_separable_calls, 1,itmp,1,itmp); if (block_diagonal_hessian ==0) { cerr << "error_allocating d3_array" << endl; ad_exit(1); } if (block_diagonal_re_list) { delete block_diagonal_re_list; block_diagonal_re_list = 0; } block_diagonal_re_list = new imatrix(1,num_separable_calls, 1,itmp); if (block_diagonal_re_list == 0) { cerr << "error_allocating imatrix" << endl; ad_exit(1); } if (block_diagonal_fe_list) { delete block_diagonal_fe_list; block_diagonal_fe_list = 0; } block_diagonal_fe_list = new imatrix(1,num_separable_calls, 1,itmpf); if (block_diagonal_fe_list ==0) { cerr << "error_allocating imatrix" << endl; ad_exit(1); } // **************************************************** if (block_diagonal_Dux) { delete block_diagonal_Dux; block_diagonal_Dux=0; } block_diagonal_Dux = new d3_array(1,num_separable_calls, 1,itmp,1,itmpf); if (block_diagonal_Dux ==0) { cerr << "error_allocating d3_array" << endl; ad_exit(1); } // **************************************************** // **************************************************** if (block_diagonal_vhessian) { delete block_diagonal_vhessian; block_diagonal_vhessian=0; } block_diagonal_vhessian = new dvar3_array(1,num_separable_calls, 1,itmp,1,itmp); if (block_diagonal_vhessian ==0) { cerr << "error_allocating d3_array" << endl; ad_exit(1); } if (block_diagonal_vhessianadjoint) { delete block_diagonal_vhessianadjoint; block_diagonal_vhessianadjoint=0; } block_diagonal_vhessianadjoint = new d3_array(1,num_separable_calls, 1,itmp,1,itmp); if (block_diagonal_vhessianadjoint ==0) { cerr << "error_allocating d3_array" << endl; ad_exit(1); } } funnel_init_var::lapprox=0; block_diagonal_flag=0; pen.deallocate(); } } }
/** * Description not yet available. * \param */ double do_gauss_hermite_block_diagonal_multi(const dvector& x, const dvector& u0,const dmatrix& Hess,const dvector& _xadjoint, const dvector& _uadjoint,const dmatrix& _Hessadjoint, function_minimizer * pmin) { ADUNCONST(dvector,xadjoint) ADUNCONST(dvector,uadjoint) //ADUNCONST(dmatrix,Hessadjoint) dvector & w= *(pmin->multinomial_weights); const int xs=x.size(); const int us=u0.size(); gradient_structure::set_NO_DERIVATIVES(); int nsc=pmin->lapprox->num_separable_calls; const ivector lrea = (*pmin->lapprox->num_local_re_array)(1,nsc); int hroom = sum(square(lrea)); int nvar=x.size()+u0.size()+hroom; independent_variables y(1,nvar); // need to set random effects active together with whatever // init parameters should be active in this phase initial_params::set_inactive_only_random_effects(); initial_params::set_active_random_effects(); /*int onvar=*/initial_params::nvarcalc(); initial_params::xinit(y); // get the initial values into the // do we need this next line? y(1,xs)=x; int i,j; // contribution for quadratic prior if (quadratic_prior::get_num_quadratic_prior()>0) { //Hess+=quadratic_prior::get_cHessian_contribution(); int & vxs = (int&)(xs); quadratic_prior::get_cHessian_contribution(Hess,vxs); } // Here need hooks for sparse matrix structures dvar3_array & block_diagonal_vhessian= *pmin->lapprox->block_diagonal_vhessian; block_diagonal_vhessian.initialize(); dvar3_array& block_diagonal_ch= *pmin->lapprox->block_diagonal_vch; //dvar3_array(*pmin->lapprox->block_diagonal_ch); int ii=xs+us+1; d3_array& bdH=(*pmin->lapprox->block_diagonal_hessian); int ic; for (ic=1;ic<=nsc;ic++) { int lus=lrea(ic); for (i=1;i<=lus;i++) for (j=1;j<=lus;j++) y(ii++)=bdH(ic)(i,j); } dvector g(1,nvar); gradcalc(0,g); gradient_structure::set_YES_DERIVATIVES(); dvar_vector vy=dvar_vector(y); //initial_params::stddev_vscale(d,vy); ii=xs+us+1; if (initial_df1b2params::have_bounded_random_effects) { cerr << "can't do importance sampling with bounded random effects" " at present" << endl; ad_exit(1); } else { for (int ic=1;ic<=nsc;ic++) { int lus=lrea(ic); if (lus>0) { for (i=1;i<=lus;i++) { for (j=1;j<=lus;j++) { block_diagonal_vhessian(ic,i,j)=vy(ii++); } } block_diagonal_ch(ic)= choleski_decomp(inv(block_diagonal_vhessian(ic))); } } } int nsamp=pmin->lapprox->use_gauss_hermite; pmin->lapprox->in_gauss_hermite_phase=1; dvar_vector sample_value(1,nsamp); sample_value.initialize(); dvar_vector tau(1,us);; // !!! This only works for one random efect in each separable call // at present. if (pmin->lapprox->gh->mi) { delete pmin->lapprox->gh->mi; pmin->lapprox->gh->mi=0; } pmin->lapprox->gh->mi=new multi_index(1,nsamp, pmin->lapprox->multi_random_effects); multi_index & mi = *(pmin->lapprox->gh->mi); //for (int is=1;is<=nsamp;is++) dvector& xx=pmin->lapprox->gh->x; do { int offset=0; pmin->lapprox->num_separable_calls=0; //pmin->lapprox->gh->is=is; for (ic=1;ic<=nsc;ic++) { int lus=lrea(ic); // will need vector stuff here when more than one random effect if (lus>0) { //tau(offset+1,offset+lus).shift(1)=block_diagonal_ch(ic)(1,1)* // pmin->lapprox->gh->x(is); dvector xv(1,lus); for (int iu=1;iu<=lus;iu++) { xv(iu)= xx(mi()(iu)); } tau(offset+1,offset+lus).shift(1)=block_diagonal_ch(ic)*xv; offset+=lus; } } // have to reorder the terms to match the block diagonal hessian imatrix & ls=*(pmin->lapprox->block_diagonal_re_list); int mmin=ls.indexmin(); int mmax=ls.indexmax(); int ii=1; int i; for (i=mmin;i<=mmax;i++) { int cmin=ls(i).indexmin(); int cmax=ls(i).indexmax(); for (int j=cmin;j<=cmax;j++) { vy(ls(i,j))+=tau(ii++); } } if (ii-1 != us) { cerr << "error in interface" << endl; ad_exit(1); } initial_params::reset(vy); // get the values into the model ii=1; for (i=mmin;i<=mmax;i++) { int cmin=ls(i).indexmin(); int cmax=ls(i).indexmax(); for (int j=cmin;j<=cmax;j++) { vy(ls(i,j))-=tau(ii++); } } *objective_function_value::pobjfun=0.0; pmin->AD_uf_outer(); ++mi; } while(mi.get_depth()<=pmin->lapprox->multi_random_effects); nsc=pmin->lapprox->num_separable_calls; dvariable vf=pmin->do_gauss_hermite_integration(); int sgn=0; dvariable ld=0.0; if (ad_comm::no_ln_det_choleski_flag) { for (int ic=1;ic<=nsc;ic++) { if (allocated(block_diagonal_vhessian(ic))) { ld+=w(2*ic)*ln_det(block_diagonal_vhessian(ic),sgn); } } ld*=0.5; } else { for (int ic=1;ic<=nsc;ic++) { if (allocated(block_diagonal_vhessian(ic))) { ld+=w(2*ic)*ln_det_choleski(block_diagonal_vhessian(ic)); } } ld*=0.5; } vf+=ld; //vf+=us*0.91893853320467241; double f=value(vf); gradcalc(nvar,g); // put uhat back into the model gradient_structure::set_NO_DERIVATIVES(); vy(xs+1,xs+us).shift(1)=u0; initial_params::reset(vy); // get the values into the model gradient_structure::set_YES_DERIVATIVES(); pmin->lapprox->in_gauss_hermite_phase=0; ii=1; for (i=1;i<=xs;i++) xadjoint(i)=g(ii++); for (i=1;i<=us;i++) uadjoint(i)=g(ii++); for (ic=1;ic<=nsc;ic++) { int lus=lrea(ic); for (i=1;i<=lus;i++) { for (j=1;j<=lus;j++) { (*pmin->lapprox->block_diagonal_vhessianadjoint)(ic)(i,j)=g(ii++); } } } return f; }
std::string Buffer::hexify (bool ascii) { return gnash::hexify(_data.get(), allocated(), ascii); }
void copyBeginning(const denseFlagVector<T> &v, int n){ assert(allocated() && len >= v.length()); copy(v.d,v.d+v.length(),d); }
/** * Description not yet available. * \param */ void laplace_approximation_calculator::generate_antithetical_rvs() { // number of random vectors const ivector & itmp=(*num_local_re_array)(1,num_separable_calls); //const ivector & itmpf=(*num_local_fixed_array)(1,num_separable_calls); for (int i=2;i<=num_separable_calls;i++) { if (itmp(i) != itmp(i-1)) { cerr << "At present can only use antithetical rv's when " "all separable calls are the same size" << endl; ad_exit(1); } } int n=itmp(1); int samplesize=num_importance_samples; // mesh size double delta=0.01; // maximum of distribution is near here double mid=sqrt(double(n-1)); dmatrix weights(1,2*n,1,2); double spread=15; if (mid-spread<=0.001) spread=mid-0.1; double ssum=0.0; double x=0.0; double tmax=(n-1)*log(mid)-0.5*mid*mid; for (x=mid-spread;x<=mid+spread;x+=delta) { ssum+=exp((n-1)*log(x)-0.5*x*x-tmax); } double tsum=0; dvector dist(1,samplesize+1); dist.initialize(); int is=0; int ii; for (x=mid-spread;x<=mid+spread;x+=delta) { tsum+=exp((n-1)*log(x)-0.5*x*x-tmax)/ssum*samplesize; int ns=int(tsum); for (ii=1;ii<=ns;ii++) { dist(++is)=x; } tsum-=ns; } if (is==samplesize-1) { dist(samplesize)=mid; } else if (is<samplesize-1) { cerr << "This can't happen" << endl; exit(1); } // get random numbers random_number_generator rng(rseed); if (antiepsilon) { if (allocated(*antiepsilon)) { delete antiepsilon; antiepsilon=0; } } antiepsilon=new dmatrix(1,samplesize,1,n); dmatrix & M=*antiepsilon; M.fill_randn(rng); for (int i=1;i<=samplesize;i++) { M(i)=M(i)/norm(M(i)); } int nvar=(samplesize-1)*n; independent_variables xx(1,nvar); ii=0; for (int i=2;i<=samplesize;i++) { for (int j=1;j<=n;j++) { xx(++ii)=M(i,j); } } fmmt1 fmc(nvar,5); //fmm fmc(nvar,5); fmc.noprintx=1; fmc.iprint=10; fmc.maxfn=2500; fmc.crit=1.e-6; double f; double fbest=1.e+50;; dvector g(1,nvar); dvector gbest(1,nvar); dvector xbest(1,nvar); gbest.fill_seqadd(1.e+50,0.); { while (fmc.ireturn>=0) { //int badflag=0; fmc.fmin(f,xx,g); if (fmc.ihang) { //int hang_flag=fmc.ihang; //double maxg=max(g); //double ccrit=fmc.crit; //int current_ifn=fmc.ifn; } if (fmc.ireturn>0) { f=fcomp1(xx,dist,samplesize,n,g,M); if (f < fbest) { fbest=f; gbest=g; xbest=xx; } } } xx=xbest; } ii=0; for (int i=2;i<=samplesize;i++) { for (int j=1;j<=n;j++) { M(i,j)=xx(++ii); } } for (int i=1;i<=samplesize;i++) { M(i)*=dist(i)/norm(M(i)); } }
void copyToPosition(const T *b, int start, int n){ assert(allocated() && len >= start+n); copy(b,b+n,&d[start]); }
// Convert each byte into its hex representation std::string Buffer::hexify () { return gnash::hexify(_data.get(), allocated(), false); }