json_object * api_json_tuples(Buffer::Ptr buf) { json_object *json_tuples = json_object_new_array(); Buffer::iterator it; print(log_debug, "==> number of tuples: %d", "api", buf->size()); for (it = buf->begin(); it != buf->end(); it++) { struct json_object *json_tuple = json_object_new_array(); buf->lock(); // TODO use long int of new json-c version // API requires milliseconds => * 1000 double timestamp = it->tvtod() * 1000; double value = it->value(); buf->unlock(); json_object_array_add(json_tuple, json_object_new_double(timestamp)); json_object_array_add(json_tuple, json_object_new_double(value)); json_object_array_add(json_tuples, json_tuple); } return json_tuples; }
DECLARE_EXPORT Calendar::~Calendar() { // De-allocate all the dynamic memory used for the bucket objects while (firstBucket) { CalendarBucket* tmp = firstBucket; firstBucket = firstBucket->nextBucket; delete tmp; } // Remove all references from locations for (Location::iterator l = Location::begin(); l != Location::end(); ++l) { if (l->getAvailable() == this) l->setAvailable(NULL); } // Remove reference from buffers for (Buffer::iterator b = Buffer::begin(); b != Buffer::end(); ++b) { if (b->getMaximumCalendar() == this) b->setMaximumCalendar(NULL); if (b->getMinimumCalendar() == this) b->setMinimumCalendar(NULL); } // Remove references from resources for (Resource::iterator r = Resource::begin(); r != Resource::end(); ++r) { if (r->getMaximumCalendar() == this) r->setMaximumCalendar(NULL); } }
DECLARE_EXPORT Item::~Item() { // Remove references from the buffers for (Buffer::iterator buf = Buffer::begin(); buf != Buffer::end(); ++buf) if (buf->getItem() == this) buf->setItem(NULL); // Remove references from the demands for (Demand::iterator l = Demand::begin(); l != Demand::end(); ++l) if (l->getItem() == this) l->setItem(NULL); }
Location::~Location() { // Remove all references from buffers to this location for (Buffer::iterator buf = Buffer::begin(); buf != Buffer::end(); ++buf) if (buf->getLocation() == this) buf->setLocation(nullptr); // Remove all references from resources to this location for (Resource::iterator res = Resource::begin(); res != Resource::end(); ++res) if (res->getLocation() == this) res->setLocation(nullptr); // Remove all references from operations to this location for (Operation::iterator oper = Operation::begin(); oper != Operation::end(); ++oper) if (oper->getLocation() == this) oper->setLocation(nullptr); // Remove all references from demands to this location for (Demand::iterator dmd = Demand::begin(); dmd != Demand::end(); ++dmd) if (dmd->getLocation() == this) dmd->setLocation(nullptr); // Remove all item suppliers referencing this location for (Supplier::iterator sup = Supplier::begin(); sup != Supplier::end(); ++sup) { for (Supplier::itemlist::const_iterator it = sup->getItems().begin(); it != sup->getItems().end(); ) { if (it->getLocation() == this) { const ItemSupplier *itemsup = &*it; ++it; // Advance iterator before the delete delete itemsup; } else ++it; } } // The ItemDistribution objects are automatically deleted by the // destructor of the Association list class. }
DECLARE_EXPORT Location::~Location() { // Remove all references from buffers to this location for (Buffer::iterator buf = Buffer::begin(); buf != Buffer::end(); ++buf) if (buf->getLocation() == this) buf->setLocation(NULL); // Remove all references from resources to this location for (Resource::iterator res = Resource::begin(); res != Resource::end(); ++res) if (res->getLocation() == this) res->setLocation(NULL); // Remove all references from operations to this location for (Operation::iterator oper = Operation::begin(); oper != Operation::end(); ++oper) if (oper->getLocation() == this) oper->setLocation(NULL); }
void SolverMRP::SolverMRPdata::solveSafetyStock(SolverMRP* solver) { OperatorDelete cleanup("sweeper", this); safety_stock_planning = true; if (getLogLevel()>0) logger << "Start safety stock replenishment pass " << solver->getConstraints() << endl; vector< list<Buffer*> > bufs(HasLevel::getNumberOfLevels() + 1); for (Buffer::iterator buf = Buffer::begin(); buf != Buffer::end(); ++buf) if (buf->getCluster() == cluster && ( buf->getMinimum() || buf->getMinimumCalendar() || buf->getType() == *BufferProcure::metadata ) ) bufs[(buf->getLevel()>=0) ? buf->getLevel() : 0].push_back(&*buf); for (vector< list<Buffer*> >::iterator b_list = bufs.begin(); b_list != bufs.end(); ++b_list) for (list<Buffer*>::iterator b = b_list->begin(); b != b_list->end(); ++b) { state->curBuffer = NULL; // A quantity of -1 is a flag for the buffer solver to solve safety stock. state->q_qty = -1.0; state->q_date = Date::infinitePast; state->a_cost = 0.0; state->a_penalty = 0.0; planningDemand = NULL; state->curDemand = NULL; state->motive = *b; state->curOwnerOpplan = NULL; // Call the buffer solver (*b)->solve(*solver, this); // Check for excess if ((*b)->getType() != *BufferProcure::metadata) (*b)->solve(cleanup, this); CommandManager::commit(); } if (getLogLevel()>0) logger << "Finished safety stock replenishment pass" << endl; safety_stock_planning = false; }
json_object * vz::api::Volkszaehler::api_json_tuples(Buffer::Ptr buf) { json_object *json_tuples = json_object_new_array(); Buffer::iterator it; print(log_debug, "==> number of tuples: %d", channel()->name(), buf->size()); uint64_t timestamp = 1; // copy all values to local buffer queue buf->lock(); for (it = buf->begin(); it != buf->end(); it++) { timestamp = round(it->tvtod() * 1000); print(log_debug, "compare: %llu %llu %f", channel()->name(), _last_timestamp, timestamp, it->tvtod() * 1000); if (_last_timestamp < timestamp ) { _values.push_back(*it); _last_timestamp = timestamp; } it->mark_delete(); } buf->unlock(); buf->clean(); if (_values.size() < 1 ) { return NULL; } for (it = _values.begin(); it != _values.end(); it++) { struct json_object *json_tuple = json_object_new_array(); // TODO use long int of new json-c version // API requires milliseconds => * 1000 double timestamp = it->tvtod() * 1000; double value = it->value(); json_object_array_add(json_tuple, json_object_new_double(timestamp)); json_object_array_add(json_tuple, json_object_new_double(value)); json_object_array_add(json_tuples, json_tuple); } return json_tuples; }
void StatusBar::redraw(Buffer::iterator& cursor) { update_terminal_size(); // Drawing first line... std::stringstream ss1; ss1 << " " << cursor.string_name(); u64 size = cursor.get_chunk()->get_file().get_size(); ss1 << " " << cursor->get_offset() << "/" << cursor.get_chunk()->get_file().get_size(); ss1 << " " << (cursor->get_offset() + cursor->get_length()) * 100 / size << "%"; Line line1(ss1.str()); Line rest(' ', _width - line1.length()); Log1("Rest: " << rest); Log1("Line1: " << line1); line1 += rest; Log1("Line1: " << line1); _brush.draw_line(_text_height, line1, Brush::status_bar_color); Line line2(' ', _width); _brush.draw_line(_text_height + 1, line2, Brush::status_bar_color); draw_marks(cursor); }
DECLARE_EXPORT PyObject* printModelSize(PyObject* self, PyObject* args) { // Free Python interpreter for other threads Py_BEGIN_ALLOW_THREADS // Execute and catch exceptions size_t count, memsize; try { // Intro logger << endl << "Size information of frePPLe " << PACKAGE_VERSION << " (" << __DATE__ << ")" << endl << endl; // Print current locale #if defined(HAVE_SETLOCALE) || defined(_MSC_VER) logger << "Locale: " << setlocale(LC_ALL,NULL) << endl << endl; #else logger << endl; #endif // Print loaded modules Environment::printModules(); // Print the number of clusters logger << "Clusters: " << HasLevel::getNumberOfClusters() << " (hanging: " << HasLevel::getNumberOfHangingClusters() << ")" << endl << endl; // Header for memory size logger << "Memory usage:" << endl; logger << "Model \tNumber\tMemory" << endl; logger << "----- \t------\t------" << endl; // Plan size_t total = Plan::instance().getSize(); logger << "Plan \t1\t"<< Plan::instance().getSize() << endl; // Locations memsize = 0; for (Location::iterator l = Location::begin(); l != Location::end(); ++l) memsize += l->getSize(); logger << "Location \t" << Location::size() << "\t" << memsize << endl; total += memsize; // Customers memsize = 0; for (Customer::iterator c = Customer::begin(); c != Customer::end(); ++c) memsize += c->getSize(); logger << "Customer \t" << Customer::size() << "\t" << memsize << endl; total += memsize; // Buffers memsize = 0; for (Buffer::iterator b = Buffer::begin(); b != Buffer::end(); ++b) memsize += b->getSize(); logger << "Buffer \t" << Buffer::size() << "\t" << memsize << endl; total += memsize; // Setup matrices memsize = 0; for (SetupMatrix::iterator s = SetupMatrix::begin(); s != SetupMatrix::end(); ++s) memsize += s->getSize(); logger << "Setup matrix \t" << SetupMatrix::size() << "\t" << memsize << endl; total += memsize; // Resources memsize = 0; for (Resource::iterator r = Resource::begin(); r != Resource::end(); ++r) memsize += r->getSize(); logger << "Resource \t" << Resource::size() << "\t" << memsize << endl; total += memsize; // Skills and resourceskills size_t countResourceSkills(0), memResourceSkills(0); memsize = 0; for (Skill::iterator sk = Skill::begin(); sk != Skill::end(); ++sk) { memsize += sk->getSize(); for (Skill::resourcelist::const_iterator rs = sk->getResources().begin(); rs != sk->getResources().end(); ++rs) { ++countResourceSkills; memResourceSkills += rs->getSize(); } } logger << "Skill \t" << Skill::size() << "\t" << memsize << endl; logger << "ResourceSkill \t" << countResourceSkills << "\t" << memResourceSkills << endl; total += memsize; // Operations, flows and loads size_t countFlows(0), memFlows(0), countLoads(0), memLoads(0); memsize = 0; for (Operation::iterator o = Operation::begin(); o != Operation::end(); ++o) { memsize += o->getSize(); for (Operation::flowlist::const_iterator fl = o->getFlows().begin(); fl != o->getFlows().end(); ++ fl) { ++countFlows; memFlows += fl->getSize(); } for (Operation::loadlist::const_iterator ld = o->getLoads().begin(); ld != o->getLoads().end(); ++ ld) { ++countLoads; memLoads += ld->getSize(); } } logger << "Operation \t" << Operation::size() << "\t" << memsize << endl; logger << "Flow \t" << countFlows << "\t" << memFlows << endl; logger << "Load \t" << countLoads << "\t" << memLoads << endl; total += memsize + memFlows + memLoads; // Calendars (which includes the buckets) memsize = 0; for (Calendar::iterator cl = Calendar::begin(); cl != Calendar::end(); ++cl) memsize += cl->getSize(); logger << "Calendar \t" << Calendar::size() << "\t" << memsize << endl; total += memsize; // Items memsize = 0; for (Item::iterator i = Item::begin(); i != Item::end(); ++i) memsize += i->getSize(); logger << "Item \t" << Item::size() << "\t" << memsize << endl; total += memsize; // Demands memsize = 0; size_t c_count = 0, c_memsize = 0; for (Demand::iterator dm = Demand::begin(); dm != Demand::end(); ++dm) { memsize += dm->getSize(); for (Problem::const_iterator cstrnt(dm->getConstraints().begin()); cstrnt != dm->getConstraints().end(); ++cstrnt) { ++c_count; c_memsize += cstrnt->getSize(); } } logger << "Demand \t" << Demand::size() << "\t" << memsize << endl; logger << "Constraints \t" << c_count << "\t" << c_memsize << endl; total += memsize + c_memsize; // Operationplans size_t countloadplans(0), countflowplans(0); memsize = count = 0; for (OperationPlan::iterator j = OperationPlan::begin(); j!=OperationPlan::end(); ++j) { ++count; memsize += sizeof(*j); countloadplans += j->sizeLoadPlans(); countflowplans += j->sizeFlowPlans(); } total += memsize; logger << "OperationPlan\t" << count << "\t" << memsize << endl; // Flowplans memsize = countflowplans * sizeof(FlowPlan); total += memsize; logger << "FlowPlan \t" << countflowplans << "\t" << memsize << endl; // Loadplans memsize = countloadplans * sizeof(LoadPlan); total += memsize; logger << "LoadPlan \t" << countloadplans << "\t" << memsize << endl; // Problems memsize = count = 0; for (Problem::const_iterator pr = Problem::begin(); pr!=Problem::end(); ++pr) { ++count; memsize += pr->getSize(); } total += memsize; logger << "Problem \t" << count << "\t" << memsize << endl; // TOTAL logger << "Total \t\t" << total << endl << endl; } catch (...) { Py_BLOCK_THREADS; PythonType::evalException(); return NULL; } Py_END_ALLOW_THREADS // Reclaim Python interpreter return Py_BuildValue(""); }
DECLARE_EXPORT PyObject* savePlan(PyObject* self, PyObject* args) { // Pick up arguments const char *filename = "plan.out"; int ok = PyArg_ParseTuple(args, "s:saveplan", &filename); if (!ok) return NULL; // Free Python interpreter for other threads Py_BEGIN_ALLOW_THREADS // Execute and catch exceptions ofstream textoutput; try { // Open the output file textoutput.open(filename, ios::out); // Write the buffer summary for (Buffer::iterator gbuf = Buffer::begin(); gbuf != Buffer::end(); ++gbuf) { if (!gbuf->getHidden()) for (Buffer::flowplanlist::const_iterator oo=gbuf->getFlowPlans().begin(); oo!=gbuf->getFlowPlans().end(); ++oo) if (oo->getType() == 1 && oo->getQuantity() != 0.0) { textoutput << "BUFFER\t" << *gbuf << '\t' << oo->getDate() << '\t' << oo->getQuantity() << '\t' << oo->getOnhand() << endl; } } // Write the demand summary for (Demand::iterator gdem = Demand::begin(); gdem != Demand::end(); ++gdem) { if (!gdem->getHidden()) { for (Demand::OperationPlan_list::const_iterator pp = gdem->getDelivery().begin(); pp != gdem->getDelivery().end(); ++pp) textoutput << "DEMAND\t" << (*gdem) << '\t' << (*pp)->getDates().getEnd() << '\t' << (*pp)->getQuantity() << endl; } } // Write the resource summary for (Resource::iterator gres = Resource::begin(); gres != Resource::end(); ++gres) { if (!gres->getHidden()) for (Resource::loadplanlist::const_iterator qq=gres->getLoadPlans().begin(); qq!=gres->getLoadPlans().end(); ++qq) if (qq->getType() == 1 && qq->getQuantity() != 0.0) { textoutput << "RESOURCE\t" << *gres << '\t' << qq->getDate() << '\t' << qq->getQuantity() << '\t' << qq->getOnhand() << endl; } } // Write the operationplan summary. for (OperationPlan::iterator rr = OperationPlan::begin(); rr != OperationPlan::end(); ++rr) { if (rr->getOperation()->getHidden()) continue; textoutput << "OPERATION\t" << rr->getOperation() << '\t' << rr->getDates().getStart() << '\t' << rr->getDates().getEnd() << '\t' << rr->getQuantity() << endl; } // Write the problem summary. for (Problem::const_iterator gprob = Problem::begin(); gprob != Problem::end(); ++gprob) { textoutput << "PROBLEM\t" << gprob->getType().type << '\t' << gprob->getDescription() << '\t' << gprob->getDates() << endl; } // Write the constraint summary for (Demand::iterator gdem = Demand::begin(); gdem != Demand::end(); ++gdem) { if (!gdem->getHidden()) { for (Problem::const_iterator i = gdem->getConstraints().begin(); i != gdem->getConstraints().end(); ++i) textoutput << "DEMAND CONSTRAINT\t" << (*gdem) << '\t' << i->getDescription() << '\t' << i->getDates() << '\t' << endl; } } // Close the output file textoutput.close(); } catch (...) { if (textoutput.is_open()) textoutput.close(); Py_BLOCK_THREADS; PythonType::evalException(); return NULL; } Py_END_ALLOW_THREADS // Reclaim Python interpreter return Py_BuildValue(""); }
DECLARE_EXPORT void HasLevel::computeLevels() { computationBusy = true; // Get exclusive access to this function in a multi-threaded environment. static Mutex levelcomputationbusy; ScopeMutexLock l(levelcomputationbusy); // Another thread may already have computed the levels while this thread was // waiting for the lock. In that case the while loop will be skipped. while (recomputeLevels) { // Reset the recomputation flag. Note that during the computation the flag // could be switched on again by some model change in a different thread. // In that case, the while loop will be rerun. recomputeLevels = false; // Force creation of all delivery operations f for (Demand::iterator gdem = Demand::begin(); gdem != Demand::end(); ++gdem) gdem->getDeliveryOperation(); // Reset current levels on buffers, resources and operations. // Also force the creation of all producing operations on the buffers. size_t numbufs = Buffer::size(); // Creating the producing operations of the buffers can cause new buffers // to be created. We repeat this loop until no new buffers are being added. // This isn't the most efficient loop, but it remains cheap and fast... while (true) { for (Buffer::iterator gbuf = Buffer::begin(); gbuf != Buffer::end(); ++gbuf) { gbuf->cluster = 0; gbuf->lvl = -1; gbuf->getProducingOperation(); } size_t numbufs_after = Buffer::size(); if (numbufs == numbufs_after) break; else numbufs = numbufs_after; } for (Resource::iterator gres = Resource::begin(); gres != Resource::end(); ++gres) { gres->cluster = 0; gres->lvl = -1; } for (Operation::iterator gop = Operation::begin(); gop != Operation::end(); ++gop) { gop->cluster = 0; gop->lvl = -1; } // Loop through all operations stack< pair<Operation*,int> > stack; Operation* cur_oper; int cur_level; Buffer *cur_buf; const Flow* cur_Flow; bool search_level; int cur_cluster; numberOfLevels = 0; numberOfClusters = 0; map<Operation*,short> visited; for (Operation::iterator g = Operation::begin(); g != Operation::end(); ++g) { // Select a new cluster number if (g->cluster) cur_cluster = g->cluster; else { // Detect hanging operations if (g->getFlows().empty() && g->getLoads().empty() && g->getSuperOperations().empty() && g->getSubOperations().empty() ) { // Cluster 0 keeps all dangling operations g->lvl = 0; continue; } cur_cluster = ++numberOfClusters; if (numberOfClusters >= UINT_MAX) throw LogicException("Too many clusters"); } #ifdef CLUSTERDEBUG logger << "Investigating operation '" << &*g << "' - current cluster " << g->cluster << endl; #endif // Do we need to activate the level search? // Criterion are: // - Not used in a super operation // - Have a producing flow on the operation itself // or on any of its sub operations search_level = false; if (g->getSuperOperations().empty()) { search_level = true; // Does the operation itself have producing flows? for (Operation::flowlist::const_iterator fl = g->getFlows().begin(); fl != g->getFlows().end() && search_level; ++fl) if (fl->isProducer()) search_level = false; if (search_level) { // Do suboperations have a producing flow? for (Operation::Operationlist::const_reverse_iterator i = g->getSubOperations().rbegin(); i != g->getSubOperations().rend() && search_level; ++i) for (Operation::flowlist::const_iterator fl = (*i)->getOperation()->getFlows().begin(); fl != (*i)->getOperation()->getFlows().end() && search_level; ++fl) if (fl->isProducer()) search_level = false; } } // If both the level and the cluster are de-activated, then we can move on if (!search_level && g->cluster) continue; // Start recursing // Note that as soon as push an operation on the stack we set its // cluster and/or level. This is avoid that operations are needlessly // pushed a second time on the stack. stack.push(make_pair(&*g, search_level ? 0 : -1)); visited.clear(); g->cluster = cur_cluster; if (search_level) g->lvl = 0; while (!stack.empty()) { // Take the top of the stack cur_oper = stack.top().first; cur_level = stack.top().second; stack.pop(); // Keep track of the maximum number of levels if (cur_level > numberOfLevels) numberOfLevels = cur_level; #ifdef CLUSTERDEBUG logger << " Recursing in Operation '" << *(cur_oper) << "' - current level " << cur_level << endl; #endif // Detect loops in the supply chain map<Operation*,short>::iterator detectloop = visited.find(cur_oper); if (detectloop == visited.end()) // Keep track of operations already visited visited.insert(make_pair(cur_oper,0)); else if (++(detectloop->second) > 1) // Already visited this operation enough times - don't repeat continue; // Push sub operations on the stack for (Operation::Operationlist::const_reverse_iterator i = cur_oper->getSubOperations().rbegin(); i != cur_oper->getSubOperations().rend(); ++i) { if ((*i)->getOperation()->lvl < cur_level) { // Search level and cluster stack.push(make_pair((*i)->getOperation(),cur_level)); (*i)->getOperation()->lvl = cur_level; (*i)->getOperation()->cluster = cur_cluster; } else if (!(*i)->getOperation()->cluster) { // Search for clusters information only stack.push(make_pair((*i)->getOperation(),-1)); (*i)->getOperation()->cluster = cur_cluster; } // else: no search required } // Push super operations on the stack for (list<Operation*>::const_reverse_iterator j = cur_oper->getSuperOperations().rbegin(); j != cur_oper->getSuperOperations().rend(); ++j) { if ((*j)->lvl < cur_level) { // Search level and cluster stack.push(make_pair(*j,cur_level)); (*j)->lvl = cur_level; (*j)->cluster = cur_cluster; } else if (!(*j)->cluster) { // Search for clusters information only stack.push(make_pair(*j,-1)); (*j)->cluster = cur_cluster; } // else: no search required } // Update level of resources linked to current operation for (Operation::loadlist::const_iterator gres = cur_oper->getLoads().begin(); gres != cur_oper->getLoads().end(); ++gres) { Resource *resptr = gres->getResource(); // Update the level of the resource if (resptr->lvl < cur_level) resptr->lvl = cur_level; // Update the cluster of the resource and operations using it if (!resptr->cluster) { resptr->cluster = cur_cluster; // Find more operations connected to this cluster by the resource for (Resource::loadlist::const_iterator resops = resptr->getLoads().begin(); resops != resptr->getLoads().end(); ++resops) if (!resops->getOperation()->cluster) { stack.push(make_pair(resops->getOperation(),-1)); resops->getOperation()->cluster = cur_cluster; } } } // Now loop through all flows of the operation for (Operation::flowlist::const_iterator gflow = cur_oper->getFlows().begin(); gflow != cur_oper->getFlows().end(); ++gflow) { cur_Flow = &*gflow; cur_buf = cur_Flow->getBuffer(); // Check whether the level search needs to continue search_level = cur_level!=-1 && cur_buf->lvl<cur_level+1; // Check if the buffer needs processing if (search_level || !cur_buf->cluster) { // Update the cluster of the current buffer cur_buf->cluster = cur_cluster; // Loop through all flows of the buffer for (Buffer::flowlist::const_iterator buffl = cur_buf->getFlows().begin(); buffl != cur_buf->getFlows().end(); ++buffl) { // Check level recursion if (cur_Flow->isConsumer() && search_level) { if (buffl->getOperation()->lvl < cur_level+1 && &*buffl != cur_Flow && buffl->isProducer()) { stack.push(make_pair(buffl->getOperation(),cur_level+1)); buffl->getOperation()->lvl = cur_level+1; buffl->getOperation()->cluster = cur_cluster; } else if (!buffl->getOperation()->cluster) { stack.push(make_pair(buffl->getOperation(),-1)); buffl->getOperation()->cluster = cur_cluster; } if (cur_level+1 > numberOfLevels) numberOfLevels = cur_level+1; cur_buf->lvl = cur_level+1; } // Check cluster recursion else if (!buffl->getOperation()->cluster) { stack.push(make_pair(buffl->getOperation(),-1)); buffl->getOperation()->cluster = cur_cluster; } } } // End of needs-procssing if statement } // End of flow loop } // End while stack not empty } // End of Operation loop // The above loop will visit ALL operations and recurse through the // buffers and resources connected to them. // Missing from the loop are buffers and resources that have no flows or // loads at all. We catch those poor lonely fellows now... for (Buffer::iterator gbuf2 = Buffer::begin(); gbuf2 != Buffer::end(); ++gbuf2) if (gbuf2->getFlows().empty()) gbuf2->cluster = 0; for (Resource::iterator gres2 = Resource::begin(); gres2 != Resource::end(); ++gres2) if (gres2->getLoads().empty()) gres2->cluster = 0; } // End of while recomputeLevels. The loop will be repeated as long as model // changes are done during the recomputation. // Unlock the exclusive access to this function computationBusy = false; }
extern "C" PyObject* OperationItemSupplier::createOrder( PyObject *self, PyObject *args, PyObject *kwdict ) { // Parse the Python arguments PyObject* pylocation = NULL; unsigned long id = 0; const char* ref = NULL; PyObject* pyitem = NULL; PyObject* pysupplier = NULL; double qty = 0; PyObject* pystart = NULL; PyObject* pyend = NULL; const char* status = NULL; const char* source = NULL; static const char *kwlist[] = { "location", "id", "reference", "item", "supplier", "quantity", "start", "end", "status", "source", NULL }; int ok = PyArg_ParseTupleAndKeywords( args, kwdict, "|OkzOOdOOzz:createOrder", const_cast<char**>(kwlist), &pylocation, &id, &ref, &pyitem, &pysupplier, &qty, &pystart, &pyend, &status, &source ); if (!ok) return NULL; Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast; Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast; // Validate all arguments if (!pylocation || !pyitem) { PyErr_SetString(PythonDataException, "item and location arguments are mandatory"); return NULL; } PythonData location_tmp(pylocation); if (!location_tmp.check(Location::metadata)) { PyErr_SetString(PythonDataException, "location argument must be of type location"); return NULL; } PythonData item_tmp(pyitem); if (!item_tmp.check(Item::metadata)) { PyErr_SetString(PythonDataException, "item argument must be of type item"); return NULL; } PythonData supplier_tmp(pysupplier); if (pysupplier && !supplier_tmp.check(Supplier::metadata)) { PyErr_SetString(PythonDataException, "supplier argument must be of type supplier"); return NULL; } Item *item = static_cast<Item*>(item_tmp.getObject()); Location *location = static_cast<Location*>(location_tmp.getObject()); Supplier *supplier = pysupplier ? static_cast<Supplier*>(supplier_tmp.getObject()) : NULL; // Find or create the destination buffer. Buffer* destbuffer = NULL; for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter) { if (bufiter->getLocation() == location && bufiter->getItem() == item) { if (destbuffer) { stringstream o; o << "Multiple buffers found for item '" << item << "'' and location'" << location << "'"; throw DataException(o.str()); } destbuffer = &*bufiter; } } if (!destbuffer) { // Create the destination buffer destbuffer = new BufferDefault(); stringstream o; o << item << " @ " << location; destbuffer->setName(o.str()); destbuffer->setItem(item); destbuffer->setLocation(location); } // Look for a matching matching supplying operation on this buffer. // Here we also trigger the creation of its producing operation, which // contains the logic to build possible transfer operations. Operation *oper = NULL; Operation* prodOper = destbuffer->getProducingOperation(); if (prodOper && prodOper->getType() == *OperationItemSupplier::metadata) { if (supplier) { if (supplier->isMemberOf(static_cast<OperationItemSupplier*>(prodOper)->getItemSupplier()->getSupplier())) oper = prodOper; } else oper = prodOper; } else if (prodOper && prodOper->getType() == *OperationAlternate::metadata) { SubOperation::iterator soperiter = prodOper->getSubOperationIterator(); while (SubOperation *soper = soperiter.next()) { if (soper->getType() == *OperationItemSupplier::metadata) { if (supplier) { if (supplier->isMemberOf(static_cast<OperationItemSupplier*>(prodOper)->getItemSupplier()->getSupplier())) { oper = soper->getOperation(); break; } } else { oper = prodOper; break; } } } } // No matching operation is found. if (!oper) { // We'll create one now, but that requires that we have a supplier defined. if (!supplier) throw DataException("Supplier is needed on this purchase order"); // Note: We know that we need to create a new one. An existing one would // have created an operation on the buffer already. ItemSupplier *itemsupplier = new ItemSupplier(); itemsupplier->setSupplier(supplier); itemsupplier->setItem(item); itemsupplier->setLocation(location); oper = new OperationItemSupplier(itemsupplier, destbuffer); new ProblemInvalidData(oper, "Purchase orders on unauthorized supplier", "operation", Date::infinitePast, Date::infiniteFuture, 1); } // Finally, create the operationplan OperationPlan *opplan = oper->createOperationPlan(qty, start, end); if (status) opplan->setStatus(status); if (ref) opplan->setReference(ref); // Return result Py_INCREF(opplan); return opplan; }
extern "C" PyObject* OperationItemDistribution::createOrder( PyObject *self, PyObject *args, PyObject *kwdict ) { // Parse the Python arguments PyObject* pydest = NULL; unsigned long id = 0; const char* ref = NULL; PyObject* pyitem = NULL; PyObject* pyorigin = NULL; double qty = 0; PyObject* pystart = NULL; PyObject* pyend = NULL; int consume = 1; const char* status = NULL; const char* source = NULL; static const char *kwlist[] = { "destination", "id", "reference", "item", "origin", "quantity", "start", "end", "consume_material", "status", "source", NULL }; int ok = PyArg_ParseTupleAndKeywords( args, kwdict, "|OkzOOdOOpzz:createOrder", const_cast<char**>(kwlist), &pydest, &id, &ref, &pyitem, &pyorigin, &qty, &pystart, &pyend, &consume, &status, &source ); if (!ok) return NULL; Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast; Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast; // Validate all arguments if (!pydest || !pyitem) { PyErr_SetString(PythonDataException, "item and destination arguments are mandatory"); return NULL; } PythonData dest_tmp(pydest); if (!dest_tmp.check(Location::metadata)) { PyErr_SetString(PythonDataException, "destination argument must be of type location"); return NULL; } PythonData item_tmp(pyitem); if (!item_tmp.check(Item::metadata)) { PyErr_SetString(PythonDataException, "item argument must be of type item"); return NULL; } PythonData origin_tmp(pyorigin); if (pyorigin && !origin_tmp.check(Location::metadata)) { PyErr_SetString(PythonDataException, "origin argument must be of type location"); return NULL; } Item *item = static_cast<Item*>(item_tmp.getObject()); Location *dest = static_cast<Location*>(dest_tmp.getObject()); Location *origin = pyorigin ? static_cast<Location*>(origin_tmp.getObject()) : NULL; // Find or create the destination buffer. Buffer* destbuffer = NULL; for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter) { if (bufiter->getLocation() == dest && bufiter->getItem() == item) { if (destbuffer) { stringstream o; o << "Multiple buffers found for item '" << item << "'' and location'" << dest << "'"; throw DataException(o.str()); } destbuffer = &*bufiter; } } if (!destbuffer) { // Create the destination buffer destbuffer = new BufferDefault(); stringstream o; o << item << " @ " << dest; destbuffer->setName(o.str()); destbuffer->setItem(item); destbuffer->setLocation(dest); } // Build the producing operation for this buffer. destbuffer->getProducingOperation(); // Look for a matching operation replenishing this buffer. Operation *oper = NULL; for (Buffer::flowlist::const_iterator flowiter = destbuffer->getFlows().begin(); flowiter != destbuffer->getFlows().end() && !oper; ++flowiter) { if (flowiter->getOperation()->getType() != *OperationItemDistribution::metadata || flowiter->getQuantity() <= 0) continue; OperationItemDistribution* opitemdist = static_cast<OperationItemDistribution*>(flowiter->getOperation()); if (origin) { // Origin must match as well for (Operation::flowlist::const_iterator fl = opitemdist->getFlows().begin(); fl != opitemdist->getFlows().end(); ++ fl) { if (fl->getQuantity() < 0 && fl->getBuffer()->getLocation()->isMemberOf(origin)) oper = opitemdist; } } else oper = opitemdist; } // No matching operation is found. if (!oper) { // We'll create one now, but that requires that we have an origin defined. if (!origin) throw DataException("Origin location is needed on this distribution order"); Buffer* originbuffer = NULL; for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter) { if (bufiter->getLocation() == origin && bufiter->getItem() == item) { if (originbuffer) { stringstream o; o << "Multiple buffers found for item '" << item << "'' and location'" << dest << "'"; throw DataException(o.str()); } originbuffer = &*bufiter; } } if (!originbuffer) { // Create the origin buffer originbuffer = new BufferDefault(); stringstream o; o << item << " @ " << origin; originbuffer->setName(o.str()); originbuffer->setItem(item); originbuffer->setLocation(origin); } // Note: We know that we need to create a new one. An existing one would // have created an operation on the buffer already. ItemDistribution *itemdist = new ItemDistribution(); itemdist->setOrigin(origin); itemdist->setItem(item); itemdist->setDestination(dest); oper = new OperationItemDistribution(itemdist, originbuffer, destbuffer); new ProblemInvalidData(oper, "Distribution orders on unauthorized lanes", "operation", Date::infinitePast, Date::infiniteFuture, 1); } // Finally, create the operationplan OperationPlan *opplan = oper->createOperationPlan(qty, start, end, NULL, NULL, 0, false); if (id) opplan->setIdentifier(id); if (status) opplan->setStatus(status); if (ref) opplan->setReference(ref); if (!consume) opplan->setConsumeMaterial(false); opplan->activate(); // Return result Py_INCREF(opplan); return opplan; }