Scheduler_task* Scheduler::get_task_by_id(uint16_t task_id) { for (uint32_t i = 0; i < task_count_; i++) { if (tasks()[i].task_id == task_id) { return &tasks()[i]; } } return NULL; }
void Scheduler::run_all_tasks_now(void) { for(uint32_t i = 0; i < task_count_; i++) { tasks()[i].run_now(); } }
int main(int argc, const char *argv[]) { po::options_description desc("options"); desc.add_options() ("nthreads", po::value<long>(), "") ("times", po::value<long>(), "") ("tasksize", po::value<long>(), "") ; po::variables_map args; po::store(po::parse_command_line(argc, argv, desc), args); po::notify(args); long nthreads = args["nthreads"].as<long>(); long times = args["times"].as<long>(); long tasksize = args["tasksize"].as<long>(); Threading::setup(nthreads); boost::posix_time::ptime startTime = boost::posix_time::microsec_clock::universal_time(); for (long n = 0; n < times; n++) { std::vector<boost::function0<void> > tasks(nthreads); for (long i = 0; i < nthreads; i++) { tasks[i] = boost::bind(runFact, tasksize / nthreads); } Threading::getInstance().scheduleTasks(tasks); } boost::posix_time::ptime endTime = boost::posix_time::microsec_clock::universal_time(); std::cout << (endTime - startTime).total_milliseconds() << std::endl; return 0; }
std::vector<size_t> solve_ap_rec(const std::vector< std::vector<type> >& costs) { assert(costs.size() == costs[0].size()); // minimize over all possible assignment-paths recursively // single root-leaf traversal has O(N ^ 3) complexity, but // the entire tree has exponentially many leafs // // A = (a1 a2 a3) // T = (t1 t2 t3) // C = MIN( // SUM(COST(a1,t1), COST(a2,t2), COST(a3,t3)), // SUM(COST(a1,t1), COST(a2,t3), COST(a3,t2)), // SUM(COST(a1,t2), COST(a2,t1), COST(a3,t3)), // SUM(COST(a1,t2), COST(a2,t3), COST(a3,t1)), // SUM(COST(a1,t3), COST(a2,t2), COST(a3,t1)), // SUM(COST(a1,t3), COST(a2,t1), COST(a3,t2)), // ) // type min_cost = std::numeric_limits<type>::max(); std::vector<size_t> agnts(costs.size(), -1lu); std::vector<size_t> tasks(costs.size(), -1lu); std::vector<size_t> pairs(costs.size(), -1lu); solve_ap_rec(costs, agnts, tasks, pairs, 0, &min_cost); return pairs; }
void *serv_thread(void *vargp) { int res; char buf[SHORT_BUF]; int connfd; Pthread_mutex_lock(&psyc); connfd = *(int *)vargp; Pthread_mutex_unlock(&psyc); Pthread_detach(pthread_self()); #ifdef DEBUG printf("one service thread is created...\n"); #endif for(;;){ res = recv(connfd, buf, SHORT_BUF, 0); if (res == -1) { delay_sys("fail to recv\n"); break; } else if (res == 0) { delay_sys("the connect broken\n"); break; } buf[res] = 0; printf("Have receve the commind : %s\n", buf); if( tasks(buf, connfd) < 0) break; } printf("cilent %d is left...\n", connfd); close(connfd); return NULL; }
std::vector<Task*> SyrkProblem::split() { double* C11 = C; double* C12 = C + ldc*n/2; double* C21 = C + n/2; double* C22 = C + ldc*n/2 + n/2; double* A11 = A; double* A12 = A + lda*n/2; double* A21 = A + n/2; double* A22 = A + lda*n/2 + n/2; double *C11_2 = (double*) malloc(n * n / 4 * sizeof(double)); double *C21_2 = (double*) malloc(n * n / 4 * sizeof(double)); double *C22_2 = (double*) malloc(n * n / 4 * sizeof(double)); memset(C11_2, 0, n * n / 4 * sizeof(double)); memset(C21_2, 0, n * n / 4 * sizeof(double)); memset(C22_2, 0, n * n / 4 * sizeof(double)); std::vector<Task*> tasks (6); tasks[0] = new Task(new SyrkProblem(C11, A11, n/2, ldc, lda)); tasks[1] = new Task(new SyrkProblem(C11_2, A12, n/2, n/2, lda)); tasks[2] = new Task(new MultProblem(C21, A21, A11, n/2, ldc, lda, lda)); tasks[3] = new Task(new MultProblem(C21_2, A22, A12, n/2, n/2, lda, lda)); tasks[4] = new Task(new SyrkProblem(C22, A21, n/2, ldc, lda)); tasks[5] = new Task(new SyrkProblem(C22_2, A22, n/2, n/2, lda)); return tasks; }
/*! * @brief Print the details of all registered tasks. */ void operator() () { w32::ts::Tasks tasks(myScheduler); for ( w32::string name; tasks.next(name); ) { (*this)(name); } }
void Scheduler::suspend_all_tasks(uint32_t delay) { for(uint32_t i = 0; i < task_count_; i++) { tasks()[i].suspend(delay); } }
std::vector<Task*> MergesortProblem::split() { int midpoint = length/2; std::vector<Task*> tasks (2); tasks[0] = new Task(new MergesortProblem(A, midpoint)); tasks[1] = new Task(new MergesortProblem(A + midpoint, midpoint + (length % 2))); return tasks; }
std::vector<Task*> TrsmProblem::split() { double* X11 = X; double* X12 = X + N*n/2; double* X21 = X + n/2; double* X22 = X + N*n/2 + n/2; double* T11 = T; double* T12 = T + N*n/2; double* T21 = T + n/2; double* T22 = T + N*n/2 + n/2; Task* task1 = new Task(3); task1->addProblem(new TrsmProblem(X11, T11, n/2, N)); task1->addProblem(new MultProblem(X12, X11, T21, n/2, N, N, N)); task1->addProblem(new TrsmProblem(X12, T22, n/2, N)); Task* task2 = new Task(3); task2->addProblem(new TrsmProblem(X21, T11, n/2, N)); task2->addProblem(new MultProblem(X22, X21, T21, n/2, N, N, N)); task2->addProblem(new TrsmProblem(X22, T22, n/2, N)); std::vector<Task*> tasks (2); tasks[0] = task1; tasks[1] = task2; return tasks; }
int main(void) { xyInit(); pidInit(); motorInit(); orientationInit(); debugPrint("Initialized Hardware"); addTask(&flightTask); addTask(&statusTask); addMenuCommand('m', motorToggleString, &motorToggle); addMenuCommand('w', motorForwardString, &motorForward); addMenuCommand('a', motorLeftString, &motorLeft); addMenuCommand('s', motorBackwardString, &motorBackward); addMenuCommand('d', motorRightString, &motorRight); addMenuCommand('x', motorUpString, &motorUp); addMenuCommand('y', motorDownString, &motorDown); addMenuCommand('p', controlToggleString, &controlToggle); addMenuCommand('n', parameterChangeString, ¶meterChange); addMenuCommand('z', zeroString, &zeroOrientation); addMenuCommand('o', silentString, &silent); addMenuCommand('r', sensorString, &printRaw); xyLed(LED_RED, LED_OFF); xyLed(LED_GREEN, LED_ON); debugPrint("Starting Tasks"); for(;;) { tasks(); } return 0; }
Scheduler_task* Scheduler::get_task_by_index(uint16_t task_index) { if (task_index < task_count_) { return &(tasks()[task_index]); } return NULL; }
bool Scheduler::sort_tasks(void) { bool sorted = false; if (task_count_ < 2) { sorted = true; return sorted; } while (sorted == false) { sorted = true; // Iterate through registered tasks for (uint32_t i = 0; i < (task_count_ - 1); i++) { if (tasks()[i].priority < tasks()[i + 1].priority) { // Task i has lower priority than task i+1 -> need swap sorted = false; } else if (tasks()[i].priority == tasks()[i + 1].priority) { if (tasks()[i].repeat_period > tasks()[i + 1].repeat_period) { // Tasks i and i+1 have equal priority, but task i has higher // repeat period than task i+1 -> need swap sorted = false; } } // Swap tasks i and i+1 if necessary if (sorted == false) { Scheduler_task tmp(tasks()[i]); tasks()[i] = tasks()[i + 1]; tasks()[i + 1] = tmp; sorted = false; } } } return sorted; }
int32_t Scheduler::update(void) { int32_t realtime_violation = 0; // Iterate through registered tasks if (task_count_ > 0) { uint32_t i = current_schedule_slot_; do { // If the task is active and has waited long enough... if (tasks()[i].is_due()) { // Execute task if (!tasks()[i].execute()) { realtime_violation++; //realtime violation!! } // Depending on shceduling strategy, select next task slot switch (schedule_strategy_) { case FIXED_PRIORITY: // Fixed priority scheme - scheduler will start over with tasks with the highest priority current_schedule_slot_ = 0; break; case ROUND_ROBIN: // Round robin scheme - scheduler will pick up where it left. current_schedule_slot_ = (current_schedule_slot_+1)%task_count_; break; } return realtime_violation; } i = (i+1)%task_count_; } while(i != current_schedule_slot_); } return realtime_violation; }
bool parse_app_info(const Json::Value& json_app_data, cocaine_node_app_info_t& app_info) { // parse drivers Json::Value tasks(json_app_data["drivers"]); if (!tasks.isObject() || !tasks.size()) { return false; } Json::Value::Members tasks_names(tasks.getMemberNames()); for (Json::Value::Members::iterator it = tasks_names.begin(); it != tasks_names.end(); ++it) { std::string task_name(*it); Json::Value task(tasks[task_name]); if (!task.isObject() || !task.size()) { continue; } cocaine_node_task_info_t task_info(task_name); if (!parse_task_info(task, task_info)) { continue; } else { app_info.tasks[task_name] = task_info; } } // parse remaining properties app_info.queue_depth = json_app_data.get("queue-depth", 0).asInt(); std::string state = json_app_data.get("state", "").asString(); if (state == "running") { app_info.status = APP_STATUS_RUNNING; } else if (state == "stopping") { app_info.status = APP_STATUS_STOPPING; } else if (state == "stopped") { app_info.status = APP_STATUS_STOPPED; } else { app_info.status = APP_STATUS_UNKNOWN; } const Json::Value slaves_props = json_app_data["slaves"]; if (slaves_props.isObject()) { app_info.slaves_busy = slaves_props.get("busy", 0).asInt(); app_info.slaves_total = slaves_props.get("total", 0).asInt(); } return true; }
void CFScheduler::scheduleCFs(const CompFragmentBunch& cf_bunch) { static int thread = 0; if (thread_pool->getNumOfThreads() == 1) { thread_pool->execCFs(cf_bunch, 0); } else { std::vector<CompFragmentBunch> tasks(thread_pool->getNumOfThreads()); BOOST_FOREACH(CompFragment *cf, cf_bunch) { tasks[thread].add(cf); thread = (thread + 1) % thread_pool->getNumOfThreads(); } for (size_t i = 0; i < tasks.size(); i++) if (tasks[i].size() > 0) thread_pool->execCFs(tasks[i], i); }
int main(void) { /* * Initialize the System Timer, UART, TWI, SPI, * ADC and the UART menu task for user or software * interaction. Also enables interrupts! * Also, the UART will be tied to stdin, stdout and stderr. * This allows you to use stdio.h utilities like printf() */ xyInit(); printf("Initializing Hardware Test...\n"); /* * Initialize Hardware */ xyLed(LED_GREEN, LED_OFF); xyLed(LED_RED, LED_ON); motorInit(); orientationInit(); /* * Register Tasks in the Scheduler. A UART task * is already registered... */ addTask(&ledTask); // Blink LED /* * Add commands for the UART menu */ addMenuCommand('b', bluetoothString, &bluetoothTest); addMenuCommand('r', sensorString, &printRaw); addMenuCommand('t', ramString, &ramTest); addMenuCommand('v', voltageString, &printVoltage); printf("Hardware Test Initialized!\n"); /* * Execute all registered tasks, forever. */ for(;;) { tasks(); } return 0; }
void extend(const std::vector<std::vector<long>>& vec) { unsigned long total_size = 0; for(const auto& inner : vec) total_size += inner.size(); std::vector<long> extended; extended.reserve(total_size); for(auto& inner : vec) extended.insert(extended.end(),inner.begin(),inner.end()); int num_cores = std::thread::hardware_concurrency(); std::vector<std::future<std::unordered_map<long,unsigned int>>> tasks(num_cores); const int work = extended.size() / tasks.size(); for(unsigned int i = 0; i < tasks.size(); ++i) { tasks[i] = std::move(std::async(std::launch::async,do_work,i,work,std::cref(extended))); } std::unordered_map<long,unsigned int> results; // do the rest of the work on the main thread for(unsigned int i = tasks.size() * work; i < extended.size(); ++i) { ++results[extended[i]]; } for(auto& task : tasks) { std::unordered_map<long,unsigned int> task_res = task.get(); for(const auto& value : task_res) { results[value.first] += value.second; } } std::cout << results[277] << "\n"; }
QuantizeStats quantize_batch ( float radius, const Vector<float> & squared_distances, Vector<float> & likes, size_t num_probes) { ASSERT_DIVIDES(num_probes, squared_distances.size); const size_t num_points = squared_distances.size / num_probes; ASSERT_SIZE(likes, num_probes * num_points); QuantizeBatchData data = { radius, num_points, & squared_distances, & likes}; QuantizeBatch tasks(data); static tbb::task_scheduler_init init; tbb::blocked_range<size_t> range(0, num_probes); tbb::parallel_reduce(range, tasks); return tasks.stats; }
void no_extend(const std::vector<std::vector<long>>& vec) { int num_cores = std::thread::hardware_concurrency(); std::vector<std::future<std::unordered_map<long,unsigned int>>> tasks(num_cores); const int work = vec.size() / tasks.size(); for(unsigned int i = 0; i < tasks.size(); ++i) { tasks[i] = std::move(std::async(std::launch::async,do_work2,i,work,std::cref(vec))); } std::unordered_map<long,unsigned int> results; // do the rest of the work on the main thread auto begin_iter = std::next(vec.begin(),tasks.size()*work); std::for_each(begin_iter,vec.end(),[&results=results](const std::vector<long>& inner) { for(long val : inner) { ++results[val]; } }); for(auto& task : tasks) { std::unordered_map<long,unsigned int> task_res = task.get(); for(const auto& value : task_res) { results[value.first] += value.second; } } std::cout << results[277] << "\n"; }
ConstructStats vq_construct_deriv ( const Vector<float> & likes, const Vector<float> & squared_distances, const Vector<uint8_t> & probes, const Vector<uint8_t> & points, Vector<uint8_t> & recons, Vector<float> & work, float tol, size_t num_probes) { ASSERT_DIVIDES(num_probes, probes.size); const size_t dim = probes.size / num_probes; ASSERT_SIZE(recons, dim * num_probes); ASSERT_SIZE(work, 2 * dim * num_probes); ASSERT_DIVIDES(dim, points.size); const size_t num_points = points.size / dim; ASSERT_SIZE(likes, num_points * num_probes); ASSERT_SIZE(squared_distances, num_points * num_probes); ConstructDerivData data = { dim, num_points, tol, & likes, & squared_distances, & probes, & points, & recons, & work}; ConstructDeriv tasks(data); static tbb::task_scheduler_init init; tbb::blocked_range<size_t> range(0, num_probes); tbb::parallel_reduce(range, tasks); return tasks.stats; }
bool Worker::getTasks() { vector<string> children; int code = zk->getChildren(m_assign_dir, true, &children); //LOG_INFO("got children:%d task num:%d", children.size(), m_tasks.size()); if (code != ZOK) return false; //find deleted task set<string> tasks(children.begin(), children.end()); for (map<string, Task*>::iterator it = m_tasks.begin(); it != m_tasks.end(); ) { if (tasks.find(it->first) == tasks.end()) { //deleted task delete it->second; LOG_INFO("delete task:%s", it->first.c_str()); m_tasks.erase(it++); } else { ++it; } } //find added task for (int i = 0; i < children.size(); ++i) { if (m_tasks.find(children[i]) == m_tasks.end()) { Task *info = getTaskInfo(children[i]); if (info != NULL) { //new task LOG_INFO("add task:%s", children[i].c_str()); m_tasks[children[i]] = info; RunTask(children[i]); } } } return true; }
CBlastnAppArgs::CBlastnAppArgs() { CRef<IBlastCmdLineArgs> arg; static const string kProgram("blastn"); arg.Reset(new CProgramDescriptionArgs(kProgram, "Nucleotide-Nucleotide BLAST")); const bool kQueryIsProtein = false; m_Args.push_back(arg); m_ClientId = kProgram + " " + CBlastVersion().Print(); static const string kDefaultTask = "megablast"; SetTask(kDefaultTask); set<string> tasks (CBlastOptionsFactory::GetTasks(CBlastOptionsFactory::eNuclNucl)); tasks.erase("vecscreen"); // vecscreen has its own program arg.Reset(new CTaskCmdLineArgs(tasks, kDefaultTask)); m_Args.push_back(arg); m_BlastDbArgs.Reset(new CBlastDatabaseArgs); m_BlastDbArgs->SetDatabaseMaskingSupport(true); arg.Reset(m_BlastDbArgs); m_Args.push_back(arg); m_StdCmdLineArgs.Reset(new CStdCmdLineArgs); arg.Reset(m_StdCmdLineArgs); m_Args.push_back(arg); arg.Reset(new CGenericSearchArgs(kQueryIsProtein, false, true)); m_Args.push_back(arg); arg.Reset(new CNuclArgs); m_Args.push_back(arg); arg.Reset(new CDiscontiguousMegablastArgs); m_Args.push_back(arg); arg.Reset(new CFilteringArgs(kQueryIsProtein)); m_Args.push_back(arg); arg.Reset(new CGappedArgs); m_Args.push_back(arg); m_HspFilteringArgs.Reset(new CHspFilteringArgs); arg.Reset(m_HspFilteringArgs); m_Args.push_back(arg); arg.Reset(new CWindowSizeArg); m_Args.push_back(arg); arg.Reset(new COffDiagonalRangeArg); m_Args.push_back(arg); arg.Reset(new CMbIndexArgs); m_Args.push_back(arg); m_QueryOptsArgs.Reset(new CQueryOptionsArgs(kQueryIsProtein)); arg.Reset(m_QueryOptsArgs); m_Args.push_back(arg); m_FormattingArgs.Reset(new CFormattingArgs); arg.Reset(m_FormattingArgs); m_Args.push_back(arg); m_MTArgs.Reset(new CMTArgs); arg.Reset(m_MTArgs); m_Args.push_back(arg); m_RemoteArgs.Reset(new CRemoteArgs); arg.Reset(m_RemoteArgs); m_Args.push_back(arg); m_DebugArgs.Reset(new CDebugArgs); arg.Reset(m_DebugArgs); m_Args.push_back(arg); }
void DatabaseOrdinary::loadTables(Context & context, boost::threadpool::pool * thread_pool) { log = &Logger::get("DatabaseOrdinary (" + name + ")"); using FileNames = std::vector<std::string>; FileNames file_names; Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(path); dir_it != dir_end; ++dir_it) { /// Для директории .svn и файла .gitignore if (dir_it.name().at(0) == '.') continue; /// Есть файлы .sql.bak - пропускаем. if (endsWith(dir_it.name(), ".sql.bak")) continue; /// Есть файлы .sql.tmp - удаляем. if (endsWith(dir_it.name(), ".sql.tmp")) { LOG_INFO(log, "Removing file " << dir_it->path()); Poco::File(dir_it->path()).remove(); continue; } /// Нужные файлы имеют имена вида table_name.sql if (endsWith(dir_it.name(), ".sql")) file_names.push_back(dir_it.name()); else throw Exception("Incorrect file extension: " + dir_it.name() + " in metadata directory " + path, ErrorCodes::INCORRECT_FILE_NAME); } /** Таблицы быстрее грузятся, если их грузить в сортированном (по именам) порядке. * Иначе (для файловой системы ext4) DirectoryIterator перебирает их в некотором порядке, * который не соответствует порядку создания таблиц и не соответствует порядку их расположения на диске. */ std::sort(file_names.begin(), file_names.end()); size_t total_tables = file_names.size(); LOG_INFO(log, "Total " << total_tables << " tables."); String data_path = context.getPath() + "/data/" + escapeForFileName(name) + "/"; StopwatchWithLock watch; size_t tables_processed = 0; auto task_function = [&](FileNames::const_iterator begin, FileNames::const_iterator end) { for (FileNames::const_iterator it = begin; it != end; ++it) { const String & table = *it; /// Сообщения, чтобы было не скучно ждать, когда сервер долго загружается. if (__sync_add_and_fetch(&tables_processed, 1) % PRINT_MESSAGE_EACH_N_TABLES == 0 || watch.lockTestAndRestart(PRINT_MESSAGE_EACH_N_SECONDS)) { LOG_INFO(log, std::fixed << std::setprecision(2) << tables_processed * 100.0 / total_tables << "%"); watch.restart(); } loadTable(context, path, *this, name, data_path, table); } }; /** packaged_task используются, чтобы исключения автоматически прокидывались в основной поток. * Недостаток - исключения попадают в основной поток только после окончания работы всех task-ов. */ const size_t bunch_size = TABLES_PARALLEL_LOAD_BUNCH_SIZE; size_t num_bunches = (total_tables + bunch_size - 1) / bunch_size; std::vector<std::packaged_task<void()>> tasks(num_bunches); for (size_t i = 0; i < num_bunches; ++i) { auto begin = file_names.begin() + i * bunch_size; auto end = (i + 1 == num_bunches) ? file_names.end() : (file_names.begin() + (i + 1) * bunch_size); tasks[i] = std::packaged_task<void()>(std::bind(task_function, begin, end)); if (thread_pool) thread_pool->schedule([i, &tasks]{ tasks[i](); }); else tasks[i](); } if (thread_pool) thread_pool->wait(); for (auto & task : tasks) task.get_future().get(); }
Awaitable<void> ReplicatorPerfTest::Run( __in wstring const & testFolder, __in int concurrentTransactions, __in int totalTransactions, __in Data::Log::LogManager & logManager) { #ifndef PERF_TEST UNREFERENCED_PARAMETER(testFolder); UNREFERENCED_PARAMETER(concurrentTransactions); UNREFERENCED_PARAMETER(totalTransactions); UNREFERENCED_PARAMETER(logManager); #else Replica::SPtr replica = Replica::Create( pId_, rId_, testFolder, logManager, underlyingSystem_->PagedAllocator()); co_await replica->OpenAsync(); FABRIC_EPOCH epoch1; epoch1.DataLossNumber = 1; epoch1.ConfigurationNumber = 1; epoch1.Reserved = nullptr; co_await replica->ChangeRoleAsync(epoch1, FABRIC_REPLICA_ROLE_PRIMARY); replica->SetReadStatus(FABRIC_SERVICE_PARTITION_ACCESS_STATUS_GRANTED); replica->SetWriteStatus(FABRIC_SERVICE_PARTITION_ACCESS_STATUS_GRANTED); KUri::CSPtr stateProviderName = GetStateProviderName(0); { Transaction::SPtr txn; replica->TxnReplicator->CreateTransaction(txn); KFinally([&] {txn->Dispose(); }); NTSTATUS status = co_await replica->TxnReplicator->AddAsync(*txn, *stateProviderName, L"ReplicatorPerfTest"); VERIFY_IS_TRUE(NT_SUCCESS(status)); co_await txn->CommitAsync(); } { IStateProvider2::SPtr stateProvider2; NTSTATUS status = replica->TxnReplicator->Get(*stateProviderName, stateProvider2); VERIFY_IS_TRUE(NT_SUCCESS(status)); VERIFY_IS_NOT_NULL(stateProvider2); VERIFY_ARE_EQUAL(*stateProviderName, stateProvider2->GetName()); IStore<int, int>::SPtr store = dynamic_cast<IStore<int, int>*>(stateProvider2.RawPtr()); Stopwatch s; s.Start(); KArray<Awaitable<void>> tasks(underlyingSystem_->PagedAllocator(), concurrentTransactions, 0); for (int i = 0; i < concurrentTransactions; i++) { status = tasks.Append(DoWorkOnKey(store, replica, totalTransactions / concurrentTransactions, i)); KInvariant(NT_SUCCESS(status)); } co_await TaskUtilities<Awaitable<void>>::WhenAll(tasks); s.Stop(); int64 txPerSec = ((totalTransactions * 1000) / s.ElapsedMilliseconds); Trace.WriteInfo( TraceComponent, "{0}: Tx/Sec is {1}", prId_->TraceId, txPerSec); } replica->SetReadStatus(FABRIC_SERVICE_PARTITION_ACCESS_STATUS_NOT_PRIMARY); replica->SetWriteStatus(FABRIC_SERVICE_PARTITION_ACCESS_STATUS_NOT_PRIMARY); co_await replica->CloseAsync(); #endif co_return; }
forceinline ExecStatus timetabling(Space& home, Propagator& p, Cap c, TaskArray<Task>& t) { int ccur = c.max(); int cmax = ccur; int cmin = ccur; // Sort tasks by decreasing capacity TaskByDecCap<Task> tbdc; Support::quicksort(&t[0], t.size(), tbdc); Region r(home); bool assigned; if (Event* e = Event::events(r,t,assigned)) { // Set of current but not required tasks Support::BitSet<Region> tasks(r,static_cast<unsigned int>(t.size())); // Process events, use ccur as the capacity that is still free do { // Current time int time = e->time(); // Process events for completion of required part for ( ; (e->type() == Event::LRT) && (e->time() == time); e++) if (t[e->idx()].mandatory()) { tasks.set(static_cast<unsigned int>(e->idx())); ccur += t[e->idx()].c(); } // Process events for completion of task for ( ; (e->type() == Event::LCT) && (e->time() == time); e++) tasks.clear(static_cast<unsigned int>(e->idx())); // Process events for start of task for ( ; (e->type() == Event::EST) && (e->time() == time); e++) tasks.set(static_cast<unsigned int>(e->idx())); // Process events for zero-length task for ( ; (e->type() == Event::ZRO) && (e->time() == time); e++) { ccur -= t[e->idx()].c(); if (ccur < cmin) cmin=ccur; if (ccur < 0) return ES_FAILED; ccur += t[e->idx()].c(); } // norun start time int nrstime = time; // Process events for start of required part for ( ; (e->type() == Event::ERT) && (e->time() == time); e++) if (t[e->idx()].mandatory()) { tasks.clear(static_cast<unsigned int>(e->idx())); ccur -= t[e->idx()].c(); if (ccur < cmin) cmin=ccur; nrstime = time+1; if (ccur < 0) return ES_FAILED; } else if (t[e->idx()].optional() && (t[e->idx()].c() > ccur)) { GECODE_ME_CHECK(t[e->idx()].excluded(home)); } // Exploit that tasks are sorted according to capacity for (Iter::Values::BitSet<Support::BitSet<Region> > j(tasks); j() && (t[j.val()].c() > ccur); ++j) // Task j cannot run from zltime to next time - 1 if (t[j.val()].mandatory()) GECODE_ME_CHECK(t[j.val()].norun(home, nrstime, e->time() - 1)); } while (e->type() != Event::END); GECODE_ME_CHECK(c.gq(home,cmax-cmin)); } if (assigned) return home.ES_SUBSUMED(p); return ES_NOFIX; }
ExecStatus basic(Space& home, Propagator& p, int c, TaskArray<Task>& t) { // Sort tasks by decreasing capacity TaskByDecCap<Task> tbdc; Support::quicksort(&t[0], t.size(), tbdc); Region r(home); Event* e = r.alloc<Event>(4*t.size()+1); // Initialize events bool assigned=true; { bool required=false; int n=0; for (int i=t.size(); i--; ) if (t[i].assigned()) { // Only add required part if (t[i].pmin() > 0) { required = true; e[n++].init(Event::ERT,t[i].lst(),i); e[n++].init(Event::LRT,t[i].ect(),i); } else if (t[i].pmax() == 0) { required = true; e[n++].init(Event::ZRO,t[i].lst(),i); } } else { assigned = false; e[n++].init(Event::EST,t[i].est(),i); e[n++].init(Event::LCT,t[i].lct(),i); // Check whether task has required part if (t[i].lst() < t[i].ect()) { required = true; e[n++].init(Event::ERT,t[i].lst(),i); e[n++].init(Event::LRT,t[i].ect(),i); } } // Check whether no task has a required part if (!required) return assigned ? home.ES_SUBSUMED(p) : ES_FIX; // Write end marker e[n++].init(Event::END,Int::Limits::infinity,-1); // Sort events Support::quicksort(e, n); } // Set of current but not required tasks Support::BitSet<Region> tasks(r,static_cast<unsigned int>(t.size())); // Process events, use c as the capacity that is still free while (e->e != Event::END) { // Current time int time = e->t; // Process events for completion of required part for ( ; (e->t == time) && (e->e == Event::LRT); e++) if (t[e->i].mandatory()) { tasks.set(static_cast<unsigned int>(e->i)); c += t[e->i].c(); } // Process events for completion of task for ( ; (e->t == time) && (e->e == Event::LCT); e++) tasks.clear(static_cast<unsigned int>(e->i)); // Process events for start of task for ( ; (e->t == time) && (e->e == Event::EST); e++) tasks.set(static_cast<unsigned int>(e->i)); // Process events for zero-length task for ( ; (e->t == time) && (e->e == Event::ZRO); e++) if (c < t[e->i].c()) return ES_FAILED; // norun start time for 0-length tasks int zltime = time; // Process events for start of required part for ( ; (e->t == time) && (e->e == Event::ERT); e++) if (t[e->i].mandatory()) { tasks.clear(static_cast<unsigned int>(e->i)); c -= t[e->i].c(); zltime = time+1; if (c < 0) return ES_FAILED; } else if (t[e->i].optional() && (t[e->i].c() > c)) { GECODE_ME_CHECK(t[e->i].excluded(home)); } // Exploit that tasks are sorted according to capacity for (Iter::Values::BitSet<Support::BitSet<Region> > j(tasks); j() && (t[j.val()].c() > c); ++j) // Task j cannot run from time to next time - 1 if (t[j.val()].mandatory()) { if (t[j.val()].pmin() > 0) { GECODE_ME_CHECK(t[j.val()].norun(home, time, e->t - 1)); } else { GECODE_ME_CHECK(t[j.val()].norun(home, zltime, e->t - 1)); } } } return assigned ? home.ES_SUBSUMED(p) : ES_NOFIX; }
int main(int argc, char **argv) { double s1, delta_vir, omega_m, x; int i, j; FILE *fp; #ifdef PARALLEL printf("STARTING>>>\n"); fflush(stdout); MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask); MPI_Comm_size(MPI_COMM_WORLD, &NTask); printf("TASK %d reporting for duty.\n",ThisTask); fflush(stdout); #endif ARGC = argc; ARGV = argv; OUTPUT=0; HOD.fredc = HOD.freds = 1.0; for(i=1; i<=99; ++i) HOD.free[i]=0; wp.esys=0; Work.chi2=0; Work.imodel=1; USE_ERRORS = 0; ITRANS=4; HUBBLE=0.7; BEST_FIT = 0; HOD.M_sat_break = 1.0e14; HOD.alpha1 = 1.0; if(argc==1) endrun("./HOD.x hod.bat_file > output"); read_parameter_file(argv[1]); if(REDSHIFT>0) { SIGMA_8 = SIGMA_8*growthfactor(REDSHIFT); HUBBLEZ = sqrt(OMEGA_M*pow(1+REDSHIFT,3.0)+1-OMEGA_M); OMEGA_Z = OMEGA_M*pow(1+REDSHIFT,3.0)/(OMEGA_M*pow(1+REDSHIFT,3.0)+(1-OMEGA_M)); fprintf(stdout,"SIGMA_8(Z=%.3f)= %.4f\n",REDSHIFT,SIGMA_8); fprintf(stdout,"H(Z=%.3f)/H0= %.4f\n",REDSHIFT,HUBBLEZ); HOD.M_min = 0; RESET_COSMOLOGY++; set_HOD_params(); } /* Output the virial overdensity for reference. */ if(OUTPUT) { omega_m=OMEGA_M*pow(1+REDSHIFT,3.0)/(OMEGA_M*pow(1+REDSHIFT,3.0)+(1-OMEGA_M)); x=omega_m-1; delta_vir=(18*PI*PI+82*x-39*x*x)/(1+x); printf("DELTA_VIR(Omega_m,z) = %f\n",delta_vir); } /* Do some initialization if we're doing SHMR */ if(SHMR_FLAG) { if(SATELLITE_PARAMETERIZATION)SHMR_PARAMS = 14; if(VARIABLE_ALPHA)SHMR_PARAMS += 2; if(VARIABLE_EXCLUSION)wpl.a[SHMR_PARAMS+1] = EXCLUSION_RADIUS; wpl.ncf = SHMR_PARAMS + VARIABLE_EXCLUSION; HOD.pdfs = 100; HOD.pdfc = 101; wpx.calculate_two_halo = 1; input_stellar_mass_bins(); // if we have input from the prompt, take that if(argc>2 && atoi(argv[2])!=999) { fp = openfile(argv[2]); fscanf(fp,"%d %d",&i,&j); for(i=1; i<=wpl.ncf; ++i) fscanf(fp,"%lf",&wpl.a[i]); fclose(fp); } } for(i=1; i<=wpl.ncf; ++i) printf("wpl.a[%d]= %e\n",i,wpl.a[i]); /* LENSING TESTING FOR ALEXIE */ if(argc>2) IDUM_MCMC=atoi(argv[2]); SIGMA_8Z0 = 0.8; if(argc>2) if(atoi(argv[2])==999) test(argc,argv); /* If there's no cross-correlation function, * set the second number density equal to the first */ if(!XCORR) GALAXY_DENSITY2 = GALAXY_DENSITY; /* Initialize the non-linear power spectrum. */ nonlinear_sigmac(8.0); sigmac_interp(1.0E13); sigmac_radius_interp(1.0); /* Skip the HOD stuff if we're SHMR-ing it: */ if(SHMR_FLAG) { if(argc>2 && atoi(argv[2])==999)test(argc, argv); if(argc>3 && atoi(argv[3])==999)test(argc, argv); goto TASKS; } /* Get the galaxy bias factor */ s1=qromo(func_galaxy_bias,log(HOD.M_low),log(HOD.M_max),midpnt); GALAXY_BIAS=s1/GALAXY_DENSITY; if(OUTPUT) fprintf(stdout,"Galaxy Bias bg= %f\n",GALAXY_BIAS); fflush(stdout); /* Get the galaxy satellite fraction */ s1=qromo(func_satellite_density,log(HOD.M_low),log(HOD.M_max),midpnt)/ GALAXY_DENSITY; if(OUTPUT) fprintf(stdout,"fsat %e\n",s1); fflush(stdout); /* Mean halo mass. */ if(OUTPUT) fprintf(stdout,"M_eff %e\n",number_weighted_halo_mass()); fflush(stdout); /* Set up BETA for wp integration. */ BETA = pow(OMEGA_M,0.6)/GALAXY_BIAS; if(OUTPUT) printf("BETA = %f\n",BETA); TASKS: tasks(argc,argv); }
static protobuf::CartographerResponse handle( Server& server, protobuf::CartographerRequest& request) { POMAGMA_INFO("Handling request"); Timer timer; protobuf::CartographerResponse response; if (request.has_crop()) { server.crop(request.crop().headroom()); response.mutable_crop(); } if (request.has_declare()) { for (const auto& name : request.declare().nullary_functions()) { server.declare(name); } response.mutable_declare(); } if (request.has_assume()) { const std::string& facts_in = request.assume().facts_in(); auto counts = server.assume(facts_in); response.mutable_assume()->set_pos_count(counts["pos"]); response.mutable_assume()->set_neg_count(counts["neg"]); response.mutable_assume()->set_merge_count(counts["merge"]); response.mutable_assume()->set_ignored_count(counts["ignored"]); } if (request.has_infer()) { const size_t priority = request.infer().priority(); const size_t theorem_count = server.infer(priority); response.mutable_infer()->set_theorem_count(theorem_count); } if (request.has_execute()) { server.execute(request.execute().program()); response.mutable_execute(); } if (request.has_aggregate()) { server.aggregate(request.aggregate().survey_in()); response.mutable_aggregate(); } if (request.has_validate()) { server.validate(); response.mutable_validate(); } if (request.has_info()) { const auto info = server.info(); response.mutable_info()->set_item_count(info.item_count); } if (request.has_dump()) { server.dump(request.dump().world_out()); response.mutable_dump(); } if (request.trim_size() > 0) { std::vector<Server::TrimTask> tasks(request.trim_size()); for (int i = 0; i < request.trim_size(); ++i) { const auto& task = request.trim(i); tasks[i].size = task.size(); tasks[i].temperature = task.temperature(); tasks[i].filename = task.filename(); response.add_trim(); } server.trim(tasks); } if (request.has_conjecture()) { const std::string& diverge_out = request.conjecture().diverge_out(); const std::string& equal_out = request.conjecture().equal_out(); const size_t max_count = request.conjecture().max_count(); auto counts = server.conjecture(diverge_out, equal_out, max_count); response.mutable_conjecture()->set_diverge_count(counts["diverge"]); response.mutable_conjecture()->set_equal_count(counts["equal"]); } if (request.has_stop()) { server.stop(); response.mutable_stop(); } POMAGMA_INFO("Handled request in " << timer.elapsed() << " sec"); return response; }
void drg_model() { int k,i,n=40,j,i1,i2,ngal; double dlogr,r,mass,xk,pnorm,psp,rm,sig,t0,t1,pnorm1,mp,mlo,mhi,dr, slo,shi,dsdM,rlo,rhi,dlogm,f1,f2,fac,cvir,rvir,rs,r1,r2,mtot=0,m, chi2lf,chi2w,x0,mlow; FILE *fp,*fp2; float x1,x2,x3,x4; char aa[1000],fname[1000],**argv; float *xx,*yy,*zz,*vx,*vy,*vz; float *wdata,*rdata,*edata,*lfdata,*lferr,*lfmag,*wmodel; int magcnt[100]; int nwdata, nlf, j1, ngrid = 10, ingal; double tmin, tmax, dtheta, theta, delta_mag, maglo; /* FITTING RIK'S DATA */ rlo = log(1.0); rhi = log(1000); dtheta = (rhi-rlo)/(19); for(i=0;i<20;++i) { theta = exp(rlo+i*dtheta); printf("ALL %e %e\n",theta,wtheta(theta)); fflush(stdout); } sprintf(Task.root_filename,"all"); tasks(ARGC,ARGV); RESET_FLAG_1H++; RESET_FLAG_2H++; GALAXY_DENSITY = 4.9e-4; NGAL_DRG = 4.9e-4; HOD.pdfc = 10; // set up centrals for RED HOD.freds = 0.3; // for RED HOD.fredc = 1; HOD.mass_shift = zbrent(func_findmshift2,0.0,2.434682,1.0E-4); //spaced in fcen0 fprintf(stdout,"fsat = %.2f fcen = %.2f mu = %.2f ngal= %e %e\n",HOD.freds,HOD.fredc, HOD.mass_shift,GALAXY_DENSITY,qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt)); for(i=0;i<20;++i) { theta = exp(rlo+i*dtheta); printf("RED %e %e\n",theta,wtheta(theta)); fflush(stdout); } sprintf(Task.root_filename,"red"); tasks(ARGC,ARGV); RESET_FLAG_1H++; RESET_FLAG_2H++; GALAXY_DENSITY = 1.9e-3; HOD.pdfc = 11; // set up centrals for BLUE HOD.freds = 1 - HOD.freds; // for BLUE printf("ngal = %e\n",qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt)); for(i=0;i<20;++i) { theta = exp(rlo+i*dtheta); printf("BLUE %e %e\n",theta,wtheta(theta)); fflush(stdout); } sprintf(Task.root_filename,"blue"); tasks(ARGC,ARGV); exit(0); BOX_SIZE = 160.; fp = openfile("wtheta_corrected.data"); nwdata = filesize(fp); wdata = vector(1,nwdata); rdata = vector(1,nwdata); edata = vector(1,nwdata); wmodel = vector(1,nwdata); for(i=1;i<=nwdata;++i) fscanf(fp,"%f %f %f",&rdata[i],&wdata[i],&edata[i]); fclose(fp); fp = openfile("LF_DRG.data"); nlf = filesize(fp); lfmag = vector(1,nlf); lfdata = vector(1,nlf); lferr = vector(1,nlf); for(i=1;i<=nlf;++i) fscanf(fp,"%f %f %f",&lfmag[i],&lfdata[i],&lferr[i]); fclose(fp); delta_mag = 0.45; maglo = -23.86-delta_mag/2; MSTAR = mstar(); tmin = log(1.0); tmax = log(1000); dtheta = (tmax-tmin)/(19); //HOD.M_min *= 1.5; //HOD.M_low *= 1.5; // get mean mass of centrals (ALL) MALL = qromo(func_drg1,log(HOD.M_low),log(HOD.M_max),midpnt)/ qromo(func_drg1a,log(HOD.M_low),log(HOD.M_max),midpnt); //new model HOD.mass_shift = 0.5; HOD.pdfc = 10; //HOD.shift_alpha = 1; mlow = 1; mhi = 2; dlogm = log(mhi/mlow)/ngrid; //analytic_sham(); /********************************** */ // let's do the prediction for the blue galaxies. // best-fit model (alpha=1) fit10 is 12 28 // best-fit model (alpha=1) fit9 is 30 19 j1 = 12; j = 19; // set up HOD as DRGs // for stepping in mshift ERROR_FLAG = 0; MSHIFT = exp((j1-0.5)*dlogm)*mlow; HOD.fredc = 1; HOD.mass_shift = zbrent(func_findmshift,0.0,10.0,1.0E-4); HOD.freds = (j-0.5)/ngrid; HOD.fredc = zbrent(func_findfredc,0.0,1.0,1.0E-4); // for stepping in fsat0 HOD.fredc = (j1-0.5)/ngrid; HOD.freds = (j-0.5)/ngrid; HOD.mass_shift = zbrent(func_findmshift2,0.0,1.434682,1.0E-4); //spaced in fcen0 // 9panel c2 5.9 //CHI 993 0.978682 0.660204 0.181224 2.119014e-01 8.793237e+00 1.237535 6.664499e-04 HOD.mass_shift = 0.181; HOD.fredc = 0.66; HOD.freds = 0.3;//0.979; HOD.freds = 1 - HOD.freds; // for NON-DRGs HOD.pdfc = 11; // set up centrals as 1-f(DRG) //HOD.pdfc = 7; // square-well blues at low-mass end //HOD.M_min = 7e11; //HOD.M_cen_max = 1.5e12; //HOD.M_low = 7e11; GALAXY_DENSITY = qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt); fprintf(stdout,"fsat = %.2f fcen = %.2f ngal= %e\n",HOD.freds,HOD.fredc, GALAXY_DENSITY); RESET_FLAG_1H++; RESET_FLAG_2H++; for(i=0;i<20;++i) { theta = exp(tmin+i*dtheta); fprintf(stdout,"WTH %e %e\n",theta,wtheta(theta)); } sprintf(Task.root_filename,"BX"); //tasks(2,argv); exit(0); /* ****************************************/ if(ARGC>3) ingal = atoi(ARGV[3]); else ingal = 1; NGAL_DRG = 6.5e-4;//*(1+(ingal-5)/2.5*0.13); // do an outer loop of the mass shifts for(j1=1;j1<=ngrid;++j1) { // go in steps of mean mass shift from 1 to 2. MSHIFT = exp((j1-0.5)*dlogm)*mlow; HOD.fredc = 1; HOD.mass_shift = zbrent(func_findmshift,0.0,10.0,1.0E-4); //HOD.mass_shift = 1.116703; //figure 3 // doing equally spaced in fcen0 //HOD.fredc = (j1-0.5)/ngrid; for(j=1;j<=ngrid;++j) { ERROR_FLAG = 0; HOD.freds = (j-0.5)/ngrid; HOD.fredc = zbrent(func_findfredc,0.0,1.0,1.0E-4); if(ERROR_FLAG) HOD.fredc = 1.0; ERROR_FLAG = 0; /* HOD.mass_shift = zbrent(func_findmshift2,0.0,1.434682,1.0E-4); //spaced in fcen0 if(ERROR_FLAG) { chi2lf = chi2w = 1.0e6; printf("CHI %d %d %f %f %f %e %e %f\n",j1,j,HOD.freds,HOD.fredc,HOD.mass_shift,chi2lf,chi2w,MSHIFT); fflush(stdout); continue; } */ //best fit model from chains (wth+n only) //1.648589e-01 7.732068e-01 9.796977e-01 MSHIFT = pow(10.0,0.164); HOD.freds = 0.7732; HOD.fredc = 0.977; HOD.mass_shift = zbrent(func_findmshift,0.0,10.0,1.0E-4); // third column 7.10 //CHI 7 10 0.950000 1.000000 0.661607 9.897945e+00 1.109673e+01 1.569168 6.500000e-04 5.905374e-04 HOD.mass_shift = 0.6616; HOD.fredc = 1.00; HOD.freds = 0.95; j1 = 7; j = 10; // 9panel c2 5.9 //CHI 993 0.978682 0.660204 0.181224 2.119014e-01 8.793237e+00 1.237535 6.664499e-04 j1 = 5; j = 9; HOD.mass_shift = 0.181; HOD.fredc = 0.66; HOD.freds = 0.979; //for 9panel c1 1.1 j1 = 1; j = 1; HOD.mass_shift = 0; HOD.fredc = NGAL_DRG/GALAXY_DENSITY; HOD.freds = NGAL_DRG/GALAXY_DENSITY; //best-fit model without LF HOD.mass_shift = 0.48; HOD.fredc = 0.99; HOD.freds = 0.69; //best-fit model with LF (bottom row of 6panel) //8.234815e-02 9.011035e-01 6.467542e-01 j1 = 7; j = 10; HOD.mass_shift = 1.505979e-01 ; HOD.fredc = 6.467542e-01 ; HOD.freds = 9.011035e-01 ; GALAXY_DENSITY = qromo(func_galaxy_density,log(HOD.M_low),log(HOD.M_max),midpnt); fprintf(stdout,"fsat = %.1f fcen = %f ngal= %e\n",HOD.freds,HOD.fredc, GALAXY_DENSITY); RESET_FLAG_1H++; RESET_FLAG_2H++; /* for(i=0;i<20;++i) { theta = exp(tmin+i*dtheta); fprintf(stdout,"WTH %e %e\n",theta,wtheta(theta)); } exit(0); */ //populate_simulation(); //exit(0); // get qudari model points //fp = openfile("q8m.dat"); // calculate the chi^2 for the wtheta values. chi2w = 0; for(i=1;i<=nwdata;++i) { x0 = wtheta(rdata[i]); wmodel[i] = x0; //q8 model //fscanf(fp,"%f %f",&x1,&wmodel[i]); //x0 = wmodel[i]; printf("XX %e %e %e %e\n",rdata[i],wdata[i],x0,edata[i]); chi2w += (wdata[i]-x0)*(wdata[i]-x0)/(edata[i]*edata[i]); } //fclose(fp); fmuh(chi2w); if(USE_COVAR) chi2w = chi2wtheta_covar(wdata,wmodel); fmuh(chi2w); //exit(0); sprintf(fname,"wth_mshift_%d.%d",j1,j); fp = fopen(fname,"w"); for(i=0;i<20;++i) { theta = exp(tmin+i*dtheta); fprintf(fp,"%e %e\n",theta,wtheta(theta)); } fclose(fp); //continue; // do the real-space clustering and HOD sprintf(Task.root_filename,"mshift_%d.%d",j1,j); tasks(2,argv); // now make the luminosity function for this model //output_drg_lf(j); sprintf(fname,"sham ../../SHAM/halosub_0.284 hod_mshift_%d.%d %f %f %f %f > gal_mshift_%d.%d",j1,j,HOD.freds,HOD.fredc,HOD.mass_shift,GALAXY_DENSITY,j1,j); //sprintf(fname,"sham ../SHAM_120/halosub_0.3323.dat hod_mshift_%.1f.%d %f %f %f > gal_mshift_%.1f.%d",HOD.mass_shift,j,HOD.freds,HOD.fredc,HOD.mass_shift,HOD.mass_shift,j); fprintf(stderr,"[%s]\n",fname); system(fname); // calculate the clustering of this drg sample sprintf(fname,"covar3 0.1 15 12 160 0 160 1 gal_mshift_%d.%d a 0 1 auto > xi.mshift_%d.%d",j1,j,j1,j); //fprintf(stderr,"[%s]\n",fname); //system(fname); // calculate the luminosity function sprintf(fname,"gal_mshift_%d.%d",j1,j); fp = openfile(fname); n = filesize(fp); for(i=1;i<=nlf;++i) magcnt[i] = 0; for(i=1;i<=n;++i) { for(k=1;k<=10;++k) fscanf(fp,"%f",&x1);// printf("%e\n",x1); } k = (x1-maglo)/delta_mag + 1; if(k>=1 && k<=nlf)magcnt[k]+=1; //printf("%d %d %f %f %f\n",i,k,x1,maglo,delta_mag); fscanf(fp,"%f",&x1); } fclose(fp); // calculate the chi^2 for the luminosity function chi2lf = 0; for(i=1;i<=nlf;++i) { if(i==nlf) x0 = log10(magcnt[i]/pow(BOX_SIZE/0.7,3.0)/delta_mag); else x0 = log10(magcnt[i]/pow(BOX_SIZE/0.7,3.0)/delta_mag); printf("LF %d %d %f %f %f\n",j1,j,x0,lfdata[i],lferr[i]); chi2lf += (x0 - lfdata[i])*(x0 - lfdata[i])/(lferr[i]*lferr[i]); } printf("CHI %d %d %f %f %f %e %e %f %e %e\n",j1,j,HOD.freds,HOD.fredc,HOD.mass_shift,chi2lf,chi2w,MSHIFT,NGAL_DRG,GALAXY_DENSITY); fflush(stdout); exit(0); } } exit(0); }