extern "C" PyObject* OperationItemSupplier::createOrder( PyObject *self, PyObject *args, PyObject *kwdict ) { // Parse the Python arguments PyObject* pylocation = NULL; unsigned long id = 0; const char* ref = NULL; PyObject* pyitem = NULL; PyObject* pysupplier = NULL; double qty = 0; PyObject* pystart = NULL; PyObject* pyend = NULL; const char* status = NULL; const char* source = NULL; static const char *kwlist[] = { "location", "id", "reference", "item", "supplier", "quantity", "start", "end", "status", "source", NULL }; int ok = PyArg_ParseTupleAndKeywords( args, kwdict, "|OkzOOdOOzz:createOrder", const_cast<char**>(kwlist), &pylocation, &id, &ref, &pyitem, &pysupplier, &qty, &pystart, &pyend, &status, &source ); if (!ok) return NULL; Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast; Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast; // Validate all arguments if (!pylocation || !pyitem) { PyErr_SetString(PythonDataException, "item and location arguments are mandatory"); return NULL; } PythonData location_tmp(pylocation); if (!location_tmp.check(Location::metadata)) { PyErr_SetString(PythonDataException, "location argument must be of type location"); return NULL; } PythonData item_tmp(pyitem); if (!item_tmp.check(Item::metadata)) { PyErr_SetString(PythonDataException, "item argument must be of type item"); return NULL; } PythonData supplier_tmp(pysupplier); if (pysupplier && !supplier_tmp.check(Supplier::metadata)) { PyErr_SetString(PythonDataException, "supplier argument must be of type supplier"); return NULL; } Item *item = static_cast<Item*>(item_tmp.getObject()); Location *location = static_cast<Location*>(location_tmp.getObject()); Supplier *supplier = pysupplier ? static_cast<Supplier*>(supplier_tmp.getObject()) : NULL; // Find or create the destination buffer. Buffer* destbuffer = NULL; Item::bufferIterator buf_iter(item); while (Buffer* tmpbuf = buf_iter.next()) { if (tmpbuf->getLocation() == location) { if (destbuffer) { stringstream o; o << "Multiple buffers found for item '" << item << "'' and location'" << location << "'"; throw DataException(o.str()); } destbuffer = tmpbuf; } } if (!destbuffer) { // Create the destination buffer destbuffer = new BufferDefault(); stringstream o; o << item << " @ " << location; destbuffer->setName(o.str()); destbuffer->setItem(item); destbuffer->setLocation(location); } // Build the producing operation for this buffer. destbuffer->getProducingOperation(); // Look for a matching operation replenishing this buffer. Operation *oper = NULL; for (Buffer::flowlist::const_iterator flowiter = destbuffer->getFlows().begin(); flowiter != destbuffer->getFlows().end() && !oper; ++flowiter) { if (flowiter->getOperation()->getType() != *OperationItemSupplier::metadata) continue; OperationItemSupplier* opitemsupplier = static_cast<OperationItemSupplier*>(flowiter->getOperation()); if (supplier) { if (supplier->isMemberOf(opitemsupplier->getItemSupplier()->getSupplier())) oper = opitemsupplier; } else oper = opitemsupplier; } // No matching operation is found. if (!oper) { // We'll create one now, but that requires that we have a supplier defined. if (!supplier) throw DataException("Supplier is needed on this purchase order"); // Note: We know that we need to create a new one. An existing one would // have created an operation on the buffer already. ItemSupplier *itemsupplier = new ItemSupplier(); itemsupplier->setSupplier(supplier); itemsupplier->setItem(item); itemsupplier->setLocation(location); oper = new OperationItemSupplier(itemsupplier, destbuffer); new ProblemInvalidData(oper, "Purchase orders on unauthorized supplier", "operation", Date::infinitePast, Date::infiniteFuture, 1); } // Finally, create the operationplan OperationPlan *opplan = oper->createOperationPlan(qty, start, end); if (id) opplan->setRawIdentifier(id); // We can use this fast method because we call activate later if (status) opplan->setStatus(status); // Reset quantity after the status update to assure that // also non-valid quantities are getting accepted. opplan->setQuantity(qty); if (ref) opplan->setReference(ref); opplan->activate(); // Return result Py_INCREF(opplan); return opplan; }
DECLARE_EXPORT void HasLevel::computeLevels() { computationBusy = true; // Get exclusive access to this function in a multi-threaded environment. static Mutex levelcomputationbusy; ScopeMutexLock l(levelcomputationbusy); // Another thread may already have computed the levels while this thread was // waiting for the lock. In that case the while loop will be skipped. while (recomputeLevels) { // Reset the recomputation flag. Note that during the computation the flag // could be switched on again by some model change in a different thread. // In that case, the while loop will be rerun. recomputeLevels = false; // Force creation of all delivery operations f for (Demand::iterator gdem = Demand::begin(); gdem != Demand::end(); ++gdem) gdem->getDeliveryOperation(); // Reset current levels on buffers, resources and operations. // Also force the creation of all producing operations on the buffers. size_t numbufs = Buffer::size(); // Creating the producing operations of the buffers can cause new buffers // to be created. We repeat this loop until no new buffers are being added. // This isn't the most efficient loop, but it remains cheap and fast... while (true) { for (Buffer::iterator gbuf = Buffer::begin(); gbuf != Buffer::end(); ++gbuf) { gbuf->cluster = 0; gbuf->lvl = -1; gbuf->getProducingOperation(); } size_t numbufs_after = Buffer::size(); if (numbufs == numbufs_after) break; else numbufs = numbufs_after; } for (Resource::iterator gres = Resource::begin(); gres != Resource::end(); ++gres) { gres->cluster = 0; gres->lvl = -1; } for (Operation::iterator gop = Operation::begin(); gop != Operation::end(); ++gop) { gop->cluster = 0; gop->lvl = -1; } // Loop through all operations stack< pair<Operation*,int> > stack; Operation* cur_oper; int cur_level; Buffer *cur_buf; const Flow* cur_Flow; bool search_level; int cur_cluster; numberOfLevels = 0; numberOfClusters = 0; map<Operation*,short> visited; for (Operation::iterator g = Operation::begin(); g != Operation::end(); ++g) { // Select a new cluster number if (g->cluster) cur_cluster = g->cluster; else { // Detect hanging operations if (g->getFlows().empty() && g->getLoads().empty() && g->getSuperOperations().empty() && g->getSubOperations().empty() ) { // Cluster 0 keeps all dangling operations g->lvl = 0; continue; } cur_cluster = ++numberOfClusters; if (numberOfClusters >= UINT_MAX) throw LogicException("Too many clusters"); } #ifdef CLUSTERDEBUG logger << "Investigating operation '" << &*g << "' - current cluster " << g->cluster << endl; #endif // Do we need to activate the level search? // Criterion are: // - Not used in a super operation // - Have a producing flow on the operation itself // or on any of its sub operations search_level = false; if (g->getSuperOperations().empty()) { search_level = true; // Does the operation itself have producing flows? for (Operation::flowlist::const_iterator fl = g->getFlows().begin(); fl != g->getFlows().end() && search_level; ++fl) if (fl->isProducer()) search_level = false; if (search_level) { // Do suboperations have a producing flow? for (Operation::Operationlist::const_reverse_iterator i = g->getSubOperations().rbegin(); i != g->getSubOperations().rend() && search_level; ++i) for (Operation::flowlist::const_iterator fl = (*i)->getOperation()->getFlows().begin(); fl != (*i)->getOperation()->getFlows().end() && search_level; ++fl) if (fl->isProducer()) search_level = false; } } // If both the level and the cluster are de-activated, then we can move on if (!search_level && g->cluster) continue; // Start recursing // Note that as soon as push an operation on the stack we set its // cluster and/or level. This is avoid that operations are needlessly // pushed a second time on the stack. stack.push(make_pair(&*g, search_level ? 0 : -1)); visited.clear(); g->cluster = cur_cluster; if (search_level) g->lvl = 0; while (!stack.empty()) { // Take the top of the stack cur_oper = stack.top().first; cur_level = stack.top().second; stack.pop(); // Keep track of the maximum number of levels if (cur_level > numberOfLevels) numberOfLevels = cur_level; #ifdef CLUSTERDEBUG logger << " Recursing in Operation '" << *(cur_oper) << "' - current level " << cur_level << endl; #endif // Detect loops in the supply chain map<Operation*,short>::iterator detectloop = visited.find(cur_oper); if (detectloop == visited.end()) // Keep track of operations already visited visited.insert(make_pair(cur_oper,0)); else if (++(detectloop->second) > 1) // Already visited this operation enough times - don't repeat continue; // Push sub operations on the stack for (Operation::Operationlist::const_reverse_iterator i = cur_oper->getSubOperations().rbegin(); i != cur_oper->getSubOperations().rend(); ++i) { if ((*i)->getOperation()->lvl < cur_level) { // Search level and cluster stack.push(make_pair((*i)->getOperation(),cur_level)); (*i)->getOperation()->lvl = cur_level; (*i)->getOperation()->cluster = cur_cluster; } else if (!(*i)->getOperation()->cluster) { // Search for clusters information only stack.push(make_pair((*i)->getOperation(),-1)); (*i)->getOperation()->cluster = cur_cluster; } // else: no search required } // Push super operations on the stack for (list<Operation*>::const_reverse_iterator j = cur_oper->getSuperOperations().rbegin(); j != cur_oper->getSuperOperations().rend(); ++j) { if ((*j)->lvl < cur_level) { // Search level and cluster stack.push(make_pair(*j,cur_level)); (*j)->lvl = cur_level; (*j)->cluster = cur_cluster; } else if (!(*j)->cluster) { // Search for clusters information only stack.push(make_pair(*j,-1)); (*j)->cluster = cur_cluster; } // else: no search required } // Update level of resources linked to current operation for (Operation::loadlist::const_iterator gres = cur_oper->getLoads().begin(); gres != cur_oper->getLoads().end(); ++gres) { Resource *resptr = gres->getResource(); // Update the level of the resource if (resptr->lvl < cur_level) resptr->lvl = cur_level; // Update the cluster of the resource and operations using it if (!resptr->cluster) { resptr->cluster = cur_cluster; // Find more operations connected to this cluster by the resource for (Resource::loadlist::const_iterator resops = resptr->getLoads().begin(); resops != resptr->getLoads().end(); ++resops) if (!resops->getOperation()->cluster) { stack.push(make_pair(resops->getOperation(),-1)); resops->getOperation()->cluster = cur_cluster; } } } // Now loop through all flows of the operation for (Operation::flowlist::const_iterator gflow = cur_oper->getFlows().begin(); gflow != cur_oper->getFlows().end(); ++gflow) { cur_Flow = &*gflow; cur_buf = cur_Flow->getBuffer(); // Check whether the level search needs to continue search_level = cur_level!=-1 && cur_buf->lvl<cur_level+1; // Check if the buffer needs processing if (search_level || !cur_buf->cluster) { // Update the cluster of the current buffer cur_buf->cluster = cur_cluster; // Loop through all flows of the buffer for (Buffer::flowlist::const_iterator buffl = cur_buf->getFlows().begin(); buffl != cur_buf->getFlows().end(); ++buffl) { // Check level recursion if (cur_Flow->isConsumer() && search_level) { if (buffl->getOperation()->lvl < cur_level+1 && &*buffl != cur_Flow && buffl->isProducer()) { stack.push(make_pair(buffl->getOperation(),cur_level+1)); buffl->getOperation()->lvl = cur_level+1; buffl->getOperation()->cluster = cur_cluster; } else if (!buffl->getOperation()->cluster) { stack.push(make_pair(buffl->getOperation(),-1)); buffl->getOperation()->cluster = cur_cluster; } if (cur_level+1 > numberOfLevels) numberOfLevels = cur_level+1; cur_buf->lvl = cur_level+1; } // Check cluster recursion else if (!buffl->getOperation()->cluster) { stack.push(make_pair(buffl->getOperation(),-1)); buffl->getOperation()->cluster = cur_cluster; } } } // End of needs-procssing if statement } // End of flow loop } // End while stack not empty } // End of Operation loop // The above loop will visit ALL operations and recurse through the // buffers and resources connected to them. // Missing from the loop are buffers and resources that have no flows or // loads at all. We catch those poor lonely fellows now... for (Buffer::iterator gbuf2 = Buffer::begin(); gbuf2 != Buffer::end(); ++gbuf2) if (gbuf2->getFlows().empty()) gbuf2->cluster = 0; for (Resource::iterator gres2 = Resource::begin(); gres2 != Resource::end(); ++gres2) if (gres2->getLoads().empty()) gres2->cluster = 0; } // End of while recomputeLevels. The loop will be repeated as long as model // changes are done during the recomputation. // Unlock the exclusive access to this function computationBusy = false; }
extern "C" PyObject* OperationItemDistribution::createOrder( PyObject *self, PyObject *args, PyObject *kwdict ) { // Parse the Python arguments PyObject* pydest = NULL; unsigned long id = 0; const char* ref = NULL; PyObject* pyitem = NULL; PyObject* pyorigin = NULL; double qty = 0; PyObject* pystart = NULL; PyObject* pyend = NULL; int consume = 1; const char* status = NULL; const char* source = NULL; static const char *kwlist[] = { "destination", "id", "reference", "item", "origin", "quantity", "start", "end", "consume_material", "status", "source", NULL }; int ok = PyArg_ParseTupleAndKeywords( args, kwdict, "|OkzOOdOOpzz:createOrder", const_cast<char**>(kwlist), &pydest, &id, &ref, &pyitem, &pyorigin, &qty, &pystart, &pyend, &consume, &status, &source ); if (!ok) return NULL; Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast; Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast; // Validate all arguments if (!pydest || !pyitem) { PyErr_SetString(PythonDataException, "item and destination arguments are mandatory"); return NULL; } PythonData dest_tmp(pydest); if (!dest_tmp.check(Location::metadata)) { PyErr_SetString(PythonDataException, "destination argument must be of type location"); return NULL; } PythonData item_tmp(pyitem); if (!item_tmp.check(Item::metadata)) { PyErr_SetString(PythonDataException, "item argument must be of type item"); return NULL; } PythonData origin_tmp(pyorigin); if (pyorigin && !origin_tmp.check(Location::metadata)) { PyErr_SetString(PythonDataException, "origin argument must be of type location"); return NULL; } Item *item = static_cast<Item*>(item_tmp.getObject()); Location *dest = static_cast<Location*>(dest_tmp.getObject()); Location *origin = pyorigin ? static_cast<Location*>(origin_tmp.getObject()) : NULL; // Find or create the destination buffer. Buffer* destbuffer = NULL; for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter) { if (bufiter->getLocation() == dest && bufiter->getItem() == item) { if (destbuffer) { stringstream o; o << "Multiple buffers found for item '" << item << "'' and location'" << dest << "'"; throw DataException(o.str()); } destbuffer = &*bufiter; } } if (!destbuffer) { // Create the destination buffer destbuffer = new BufferDefault(); stringstream o; o << item << " @ " << dest; destbuffer->setName(o.str()); destbuffer->setItem(item); destbuffer->setLocation(dest); } // Build the producing operation for this buffer. destbuffer->getProducingOperation(); // Look for a matching operation replenishing this buffer. Operation *oper = NULL; for (Buffer::flowlist::const_iterator flowiter = destbuffer->getFlows().begin(); flowiter != destbuffer->getFlows().end() && !oper; ++flowiter) { if (flowiter->getOperation()->getType() != *OperationItemDistribution::metadata || flowiter->getQuantity() <= 0) continue; OperationItemDistribution* opitemdist = static_cast<OperationItemDistribution*>(flowiter->getOperation()); if (origin) { // Origin must match as well for (Operation::flowlist::const_iterator fl = opitemdist->getFlows().begin(); fl != opitemdist->getFlows().end(); ++ fl) { if (fl->getQuantity() < 0 && fl->getBuffer()->getLocation()->isMemberOf(origin)) oper = opitemdist; } } else oper = opitemdist; } // No matching operation is found. if (!oper) { // We'll create one now, but that requires that we have an origin defined. if (!origin) throw DataException("Origin location is needed on this distribution order"); Buffer* originbuffer = NULL; for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter) { if (bufiter->getLocation() == origin && bufiter->getItem() == item) { if (originbuffer) { stringstream o; o << "Multiple buffers found for item '" << item << "'' and location'" << dest << "'"; throw DataException(o.str()); } originbuffer = &*bufiter; } } if (!originbuffer) { // Create the origin buffer originbuffer = new BufferDefault(); stringstream o; o << item << " @ " << origin; originbuffer->setName(o.str()); originbuffer->setItem(item); originbuffer->setLocation(origin); } // Note: We know that we need to create a new one. An existing one would // have created an operation on the buffer already. ItemDistribution *itemdist = new ItemDistribution(); itemdist->setOrigin(origin); itemdist->setItem(item); itemdist->setDestination(dest); oper = new OperationItemDistribution(itemdist, originbuffer, destbuffer); new ProblemInvalidData(oper, "Distribution orders on unauthorized lanes", "operation", Date::infinitePast, Date::infiniteFuture, 1); } // Finally, create the operationplan OperationPlan *opplan = oper->createOperationPlan(qty, start, end, NULL, NULL, 0, false); if (id) opplan->setIdentifier(id); if (status) opplan->setStatus(status); if (ref) opplan->setReference(ref); if (!consume) opplan->setConsumeMaterial(false); opplan->activate(); // Return result Py_INCREF(opplan); return opplan; }