DECLARE_EXPORT void Plan::setCurrent (Date l) { // Update the time cur_Date = l; // Let all operationplans check for new ProblemBeforeCurrent and // ProblemBeforeFence problems. for (Operation::iterator i = Operation::begin(); i != Operation::end(); ++i) i->setChanged(); }
DECLARE_EXPORT PyObject* eraseModel(PyObject* self, PyObject* args) { // Pick up arguments PyObject *obj = NULL; int ok = PyArg_ParseTuple(args, "|O:erase", &obj); if (!ok) return NULL; // Validate the argument bool deleteStaticModel = false; if (obj) deleteStaticModel = PythonData(obj).getBool(); // Execute and catch exceptions Py_BEGIN_ALLOW_THREADS // Free Python interpreter for other threads try { if (deleteStaticModel) { // Delete all entities. // The order is chosen to minimize the work of the individual destructors. // E.g. the destructor of the item class recurses over all demands and // all buffers. It is much faster if there are none already. Operation::clear(); Demand::clear(); Buffer::clear(); Resource::clear(); SetupMatrix::clear(); Location::clear(); Customer::clear(); Calendar::clear(); Supplier::clear(); Item::clear(); Plan::instance().setName(""); Plan::instance().setDescription(""); // The setup operation is a static singleton and should always be around OperationSetup::setupoperation = new OperationSetup(); OperationSetup::setupoperation->setName("setup operation"); } else { // Delete the operationplans only for (Operation::iterator gop = Operation::begin(); gop != Operation::end(); ++gop) gop->deleteOperationPlans(); } } catch (...) { Py_BLOCK_THREADS; PythonType::evalException(); return NULL; } Py_END_ALLOW_THREADS // Reclaim Python interpreter return Py_BuildValue(""); }
DECLARE_EXPORT Skill::~Skill() { // The ResourceSkill objects are automatically deleted by the destructor // of the Association list class. // Clean up the references on the load models for (Operation::iterator o = Operation::begin(); o != Operation::end(); ++o) for(Operation::loadlist::const_iterator l = o->getLoads().begin(); l != o->getLoads().end(); ++l) if (l->getSkill() == this) const_cast<Load&>(*l).setSkill(NULL); }
Location::~Location() { // Remove all references from buffers to this location for (Buffer::iterator buf = Buffer::begin(); buf != Buffer::end(); ++buf) if (buf->getLocation() == this) buf->setLocation(nullptr); // Remove all references from resources to this location for (Resource::iterator res = Resource::begin(); res != Resource::end(); ++res) if (res->getLocation() == this) res->setLocation(nullptr); // Remove all references from operations to this location for (Operation::iterator oper = Operation::begin(); oper != Operation::end(); ++oper) if (oper->getLocation() == this) oper->setLocation(nullptr); // Remove all references from demands to this location for (Demand::iterator dmd = Demand::begin(); dmd != Demand::end(); ++dmd) if (dmd->getLocation() == this) dmd->setLocation(nullptr); // Remove all item suppliers referencing this location for (Supplier::iterator sup = Supplier::begin(); sup != Supplier::end(); ++sup) { for (Supplier::itemlist::const_iterator it = sup->getItems().begin(); it != sup->getItems().end(); ) { if (it->getLocation() == this) { const ItemSupplier *itemsup = &*it; ++it; // Advance iterator before the delete delete itemsup; } else ++it; } } // The ItemDistribution objects are automatically deleted by the // destructor of the Association list class. }
DECLARE_EXPORT Location::~Location() { // Remove all references from buffers to this location for (Buffer::iterator buf = Buffer::begin(); buf != Buffer::end(); ++buf) if (buf->getLocation() == this) buf->setLocation(NULL); // Remove all references from resources to this location for (Resource::iterator res = Resource::begin(); res != Resource::end(); ++res) if (res->getLocation() == this) res->setLocation(NULL); // Remove all references from operations to this location for (Operation::iterator oper = Operation::begin(); oper != Operation::end(); ++oper) if (oper->getLocation() == this) oper->setLocation(NULL); }
void Load::writer(const MetaCategory* c, Serializer* o) { bool firstload = true; for (Operation::iterator i = Operation::begin(); i != Operation::end(); ++i) for (Operation::loadlist::const_iterator j = i->getLoads().begin(); j != i->getLoads().end(); ++j) { if (firstload) { o->BeginList(Tags::loads); firstload = false; } // We use the FULL mode, to force the loads being written regardless // of the depth in the XML tree. o->writeElement(Tags::load, &*j, FULL); } if (!firstload) o->EndList(Tags::loads); }
DECLARE_EXPORT PyObject* printModelSize(PyObject* self, PyObject* args) { // Free Python interpreter for other threads Py_BEGIN_ALLOW_THREADS // Execute and catch exceptions size_t count, memsize; try { // Intro logger << endl << "Size information of frePPLe " << PACKAGE_VERSION << " (" << __DATE__ << ")" << endl << endl; // Print current locale #if defined(HAVE_SETLOCALE) || defined(_MSC_VER) logger << "Locale: " << setlocale(LC_ALL,NULL) << endl << endl; #else logger << endl; #endif // Print loaded modules Environment::printModules(); // Print the number of clusters logger << "Clusters: " << HasLevel::getNumberOfClusters() << " (hanging: " << HasLevel::getNumberOfHangingClusters() << ")" << endl << endl; // Header for memory size logger << "Memory usage:" << endl; logger << "Model \tNumber\tMemory" << endl; logger << "----- \t------\t------" << endl; // Plan size_t total = Plan::instance().getSize(); logger << "Plan \t1\t"<< Plan::instance().getSize() << endl; // Locations memsize = 0; for (Location::iterator l = Location::begin(); l != Location::end(); ++l) memsize += l->getSize(); logger << "Location \t" << Location::size() << "\t" << memsize << endl; total += memsize; // Customers memsize = 0; for (Customer::iterator c = Customer::begin(); c != Customer::end(); ++c) memsize += c->getSize(); logger << "Customer \t" << Customer::size() << "\t" << memsize << endl; total += memsize; // Buffers memsize = 0; for (Buffer::iterator b = Buffer::begin(); b != Buffer::end(); ++b) memsize += b->getSize(); logger << "Buffer \t" << Buffer::size() << "\t" << memsize << endl; total += memsize; // Setup matrices memsize = 0; for (SetupMatrix::iterator s = SetupMatrix::begin(); s != SetupMatrix::end(); ++s) memsize += s->getSize(); logger << "Setup matrix \t" << SetupMatrix::size() << "\t" << memsize << endl; total += memsize; // Resources memsize = 0; for (Resource::iterator r = Resource::begin(); r != Resource::end(); ++r) memsize += r->getSize(); logger << "Resource \t" << Resource::size() << "\t" << memsize << endl; total += memsize; // Skills and resourceskills size_t countResourceSkills(0), memResourceSkills(0); memsize = 0; for (Skill::iterator sk = Skill::begin(); sk != Skill::end(); ++sk) { memsize += sk->getSize(); for (Skill::resourcelist::const_iterator rs = sk->getResources().begin(); rs != sk->getResources().end(); ++rs) { ++countResourceSkills; memResourceSkills += rs->getSize(); } } logger << "Skill \t" << Skill::size() << "\t" << memsize << endl; logger << "ResourceSkill \t" << countResourceSkills << "\t" << memResourceSkills << endl; total += memsize; // Operations, flows and loads size_t countFlows(0), memFlows(0), countLoads(0), memLoads(0); memsize = 0; for (Operation::iterator o = Operation::begin(); o != Operation::end(); ++o) { memsize += o->getSize(); for (Operation::flowlist::const_iterator fl = o->getFlows().begin(); fl != o->getFlows().end(); ++ fl) { ++countFlows; memFlows += fl->getSize(); } for (Operation::loadlist::const_iterator ld = o->getLoads().begin(); ld != o->getLoads().end(); ++ ld) { ++countLoads; memLoads += ld->getSize(); } } logger << "Operation \t" << Operation::size() << "\t" << memsize << endl; logger << "Flow \t" << countFlows << "\t" << memFlows << endl; logger << "Load \t" << countLoads << "\t" << memLoads << endl; total += memsize + memFlows + memLoads; // Calendars (which includes the buckets) memsize = 0; for (Calendar::iterator cl = Calendar::begin(); cl != Calendar::end(); ++cl) memsize += cl->getSize(); logger << "Calendar \t" << Calendar::size() << "\t" << memsize << endl; total += memsize; // Items memsize = 0; for (Item::iterator i = Item::begin(); i != Item::end(); ++i) memsize += i->getSize(); logger << "Item \t" << Item::size() << "\t" << memsize << endl; total += memsize; // Demands memsize = 0; size_t c_count = 0, c_memsize = 0; for (Demand::iterator dm = Demand::begin(); dm != Demand::end(); ++dm) { memsize += dm->getSize(); for (Problem::const_iterator cstrnt(dm->getConstraints().begin()); cstrnt != dm->getConstraints().end(); ++cstrnt) { ++c_count; c_memsize += cstrnt->getSize(); } } logger << "Demand \t" << Demand::size() << "\t" << memsize << endl; logger << "Constraints \t" << c_count << "\t" << c_memsize << endl; total += memsize + c_memsize; // Operationplans size_t countloadplans(0), countflowplans(0); memsize = count = 0; for (OperationPlan::iterator j = OperationPlan::begin(); j!=OperationPlan::end(); ++j) { ++count; memsize += sizeof(*j); countloadplans += j->sizeLoadPlans(); countflowplans += j->sizeFlowPlans(); } total += memsize; logger << "OperationPlan\t" << count << "\t" << memsize << endl; // Flowplans memsize = countflowplans * sizeof(FlowPlan); total += memsize; logger << "FlowPlan \t" << countflowplans << "\t" << memsize << endl; // Loadplans memsize = countloadplans * sizeof(LoadPlan); total += memsize; logger << "LoadPlan \t" << countloadplans << "\t" << memsize << endl; // Problems memsize = count = 0; for (Problem::const_iterator pr = Problem::begin(); pr!=Problem::end(); ++pr) { ++count; memsize += pr->getSize(); } total += memsize; logger << "Problem \t" << count << "\t" << memsize << endl; // TOTAL logger << "Total \t\t" << total << endl << endl; } catch (...) { Py_BLOCK_THREADS; PythonType::evalException(); return NULL; } Py_END_ALLOW_THREADS // Reclaim Python interpreter return Py_BuildValue(""); }
DECLARE_EXPORT void HasLevel::computeLevels() { computationBusy = true; // Get exclusive access to this function in a multi-threaded environment. static Mutex levelcomputationbusy; ScopeMutexLock l(levelcomputationbusy); // Another thread may already have computed the levels while this thread was // waiting for the lock. In that case the while loop will be skipped. while (recomputeLevels) { // Reset the recomputation flag. Note that during the computation the flag // could be switched on again by some model change in a different thread. // In that case, the while loop will be rerun. recomputeLevels = false; // Force creation of all delivery operations f for (Demand::iterator gdem = Demand::begin(); gdem != Demand::end(); ++gdem) gdem->getDeliveryOperation(); // Reset current levels on buffers, resources and operations. // Also force the creation of all producing operations on the buffers. size_t numbufs = Buffer::size(); // Creating the producing operations of the buffers can cause new buffers // to be created. We repeat this loop until no new buffers are being added. // This isn't the most efficient loop, but it remains cheap and fast... while (true) { for (Buffer::iterator gbuf = Buffer::begin(); gbuf != Buffer::end(); ++gbuf) { gbuf->cluster = 0; gbuf->lvl = -1; gbuf->getProducingOperation(); } size_t numbufs_after = Buffer::size(); if (numbufs == numbufs_after) break; else numbufs = numbufs_after; } for (Resource::iterator gres = Resource::begin(); gres != Resource::end(); ++gres) { gres->cluster = 0; gres->lvl = -1; } for (Operation::iterator gop = Operation::begin(); gop != Operation::end(); ++gop) { gop->cluster = 0; gop->lvl = -1; } // Loop through all operations stack< pair<Operation*,int> > stack; Operation* cur_oper; int cur_level; Buffer *cur_buf; const Flow* cur_Flow; bool search_level; int cur_cluster; numberOfLevels = 0; numberOfClusters = 0; map<Operation*,short> visited; for (Operation::iterator g = Operation::begin(); g != Operation::end(); ++g) { // Select a new cluster number if (g->cluster) cur_cluster = g->cluster; else { // Detect hanging operations if (g->getFlows().empty() && g->getLoads().empty() && g->getSuperOperations().empty() && g->getSubOperations().empty() ) { // Cluster 0 keeps all dangling operations g->lvl = 0; continue; } cur_cluster = ++numberOfClusters; if (numberOfClusters >= UINT_MAX) throw LogicException("Too many clusters"); } #ifdef CLUSTERDEBUG logger << "Investigating operation '" << &*g << "' - current cluster " << g->cluster << endl; #endif // Do we need to activate the level search? // Criterion are: // - Not used in a super operation // - Have a producing flow on the operation itself // or on any of its sub operations search_level = false; if (g->getSuperOperations().empty()) { search_level = true; // Does the operation itself have producing flows? for (Operation::flowlist::const_iterator fl = g->getFlows().begin(); fl != g->getFlows().end() && search_level; ++fl) if (fl->isProducer()) search_level = false; if (search_level) { // Do suboperations have a producing flow? for (Operation::Operationlist::const_reverse_iterator i = g->getSubOperations().rbegin(); i != g->getSubOperations().rend() && search_level; ++i) for (Operation::flowlist::const_iterator fl = (*i)->getOperation()->getFlows().begin(); fl != (*i)->getOperation()->getFlows().end() && search_level; ++fl) if (fl->isProducer()) search_level = false; } } // If both the level and the cluster are de-activated, then we can move on if (!search_level && g->cluster) continue; // Start recursing // Note that as soon as push an operation on the stack we set its // cluster and/or level. This is avoid that operations are needlessly // pushed a second time on the stack. stack.push(make_pair(&*g, search_level ? 0 : -1)); visited.clear(); g->cluster = cur_cluster; if (search_level) g->lvl = 0; while (!stack.empty()) { // Take the top of the stack cur_oper = stack.top().first; cur_level = stack.top().second; stack.pop(); // Keep track of the maximum number of levels if (cur_level > numberOfLevels) numberOfLevels = cur_level; #ifdef CLUSTERDEBUG logger << " Recursing in Operation '" << *(cur_oper) << "' - current level " << cur_level << endl; #endif // Detect loops in the supply chain map<Operation*,short>::iterator detectloop = visited.find(cur_oper); if (detectloop == visited.end()) // Keep track of operations already visited visited.insert(make_pair(cur_oper,0)); else if (++(detectloop->second) > 1) // Already visited this operation enough times - don't repeat continue; // Push sub operations on the stack for (Operation::Operationlist::const_reverse_iterator i = cur_oper->getSubOperations().rbegin(); i != cur_oper->getSubOperations().rend(); ++i) { if ((*i)->getOperation()->lvl < cur_level) { // Search level and cluster stack.push(make_pair((*i)->getOperation(),cur_level)); (*i)->getOperation()->lvl = cur_level; (*i)->getOperation()->cluster = cur_cluster; } else if (!(*i)->getOperation()->cluster) { // Search for clusters information only stack.push(make_pair((*i)->getOperation(),-1)); (*i)->getOperation()->cluster = cur_cluster; } // else: no search required } // Push super operations on the stack for (list<Operation*>::const_reverse_iterator j = cur_oper->getSuperOperations().rbegin(); j != cur_oper->getSuperOperations().rend(); ++j) { if ((*j)->lvl < cur_level) { // Search level and cluster stack.push(make_pair(*j,cur_level)); (*j)->lvl = cur_level; (*j)->cluster = cur_cluster; } else if (!(*j)->cluster) { // Search for clusters information only stack.push(make_pair(*j,-1)); (*j)->cluster = cur_cluster; } // else: no search required } // Update level of resources linked to current operation for (Operation::loadlist::const_iterator gres = cur_oper->getLoads().begin(); gres != cur_oper->getLoads().end(); ++gres) { Resource *resptr = gres->getResource(); // Update the level of the resource if (resptr->lvl < cur_level) resptr->lvl = cur_level; // Update the cluster of the resource and operations using it if (!resptr->cluster) { resptr->cluster = cur_cluster; // Find more operations connected to this cluster by the resource for (Resource::loadlist::const_iterator resops = resptr->getLoads().begin(); resops != resptr->getLoads().end(); ++resops) if (!resops->getOperation()->cluster) { stack.push(make_pair(resops->getOperation(),-1)); resops->getOperation()->cluster = cur_cluster; } } } // Now loop through all flows of the operation for (Operation::flowlist::const_iterator gflow = cur_oper->getFlows().begin(); gflow != cur_oper->getFlows().end(); ++gflow) { cur_Flow = &*gflow; cur_buf = cur_Flow->getBuffer(); // Check whether the level search needs to continue search_level = cur_level!=-1 && cur_buf->lvl<cur_level+1; // Check if the buffer needs processing if (search_level || !cur_buf->cluster) { // Update the cluster of the current buffer cur_buf->cluster = cur_cluster; // Loop through all flows of the buffer for (Buffer::flowlist::const_iterator buffl = cur_buf->getFlows().begin(); buffl != cur_buf->getFlows().end(); ++buffl) { // Check level recursion if (cur_Flow->isConsumer() && search_level) { if (buffl->getOperation()->lvl < cur_level+1 && &*buffl != cur_Flow && buffl->isProducer()) { stack.push(make_pair(buffl->getOperation(),cur_level+1)); buffl->getOperation()->lvl = cur_level+1; buffl->getOperation()->cluster = cur_cluster; } else if (!buffl->getOperation()->cluster) { stack.push(make_pair(buffl->getOperation(),-1)); buffl->getOperation()->cluster = cur_cluster; } if (cur_level+1 > numberOfLevels) numberOfLevels = cur_level+1; cur_buf->lvl = cur_level+1; } // Check cluster recursion else if (!buffl->getOperation()->cluster) { stack.push(make_pair(buffl->getOperation(),-1)); buffl->getOperation()->cluster = cur_cluster; } } } // End of needs-procssing if statement } // End of flow loop } // End while stack not empty } // End of Operation loop // The above loop will visit ALL operations and recurse through the // buffers and resources connected to them. // Missing from the loop are buffers and resources that have no flows or // loads at all. We catch those poor lonely fellows now... for (Buffer::iterator gbuf2 = Buffer::begin(); gbuf2 != Buffer::end(); ++gbuf2) if (gbuf2->getFlows().empty()) gbuf2->cluster = 0; for (Resource::iterator gres2 = Resource::begin(); gres2 != Resource::end(); ++gres2) if (gres2->getLoads().empty()) gres2->cluster = 0; } // End of while recomputeLevels. The loop will be repeated as long as model // changes are done during the recomputation. // Unlock the exclusive access to this function computationBusy = false; }