Exemplo n.º 1
0
PyObject* CalendarEventIterator::iternext()
{
  if ((forward && eventiter.getDate() == Date::infiniteFuture)
      || (!forward && eventiter.getDate() == Date::infinitePast))
    return NULL;
  PythonData x;
  if (dynamic_cast<CalendarDefault*>(cal))
  {
    if (eventiter.getBucket())
      x = PythonData(dynamic_cast<const CalendarBucket*>(eventiter.getBucket())->getValue());
    else
      x = PythonData(dynamic_cast<CalendarDefault*>(cal)->getDefault());
  }
  else
    // Unknown calendar type we can't iterate
    return NULL;
  PyObject* result = Py_BuildValue("(N,N)",
      static_cast<PyObject*>(PythonData(eventiter.getDate())),
      static_cast<PyObject*>(x)
      );
  if (forward)
    ++eventiter;
  else
    --eventiter;
  return result;
}
Exemplo n.º 2
0
DECLARE_EXPORT PyObject* Calendar::getEvents(
  PyObject* self, PyObject* args
)
{
  try
  {
    // Pick up the calendar
    Calendar *cal = NULL;
    PythonData c(self);
    if (c.check(CalendarDefault::metadata))
      cal = static_cast<CalendarDefault*>(self);
    else
      throw LogicException("Invalid calendar type");

    // Parse the arguments
    PyObject* pystart = NULL;
    PyObject* pydirection = NULL;
    if (!PyArg_ParseTuple(args, "|OO:setvalue", &pystart, &pydirection))
      return NULL;
    Date startdate = pystart ? PythonData(pystart).getDate() : Date::infinitePast;
    bool forward = pydirection ? PythonData(pydirection).getBool() : true;

    // Return the iterator
    return new CalendarEventIterator(cal, startdate, forward);
  }
  catch(...)
  {
    PythonType::evalException();
    return NULL;
  }
}
Exemplo n.º 3
0
void SolverMRP::solve(const BufferInfinite* b, void* v)
{
  SolverMRPdata* data = static_cast<SolverMRPdata*>(v);

  // Call the user exit
  if (userexit_buffer) userexit_buffer.call(b, PythonData(data->constrainedPlanning));

  // Message
  if (data->getSolver()->getLogLevel()>1)
    logger << indent(b->getLevel()) << "  Infinite buffer '" << b << "' is asked: "
        << data->state->q_qty << "  " << data->state->q_date << endl;

  // Reply whatever is requested, regardless of date, quantity or supply.
  // The demand is not propagated upstream either.
  data->state->a_qty = data->state->q_qty;
  data->state->a_date = data->state->q_date;
  if (b->getItem())
    data->state->a_cost += data->state->q_qty * b->getItem()->getPrice();

  // Message
  if (data->getSolver()->getLogLevel()>1)
    logger << indent(b->getLevel()) << "  Infinite buffer '" << b << "' answers: "
        << data->state->a_qty << "  " << data->state->a_date << "  "
        << data->state->a_cost << "  " << data->state->a_penalty << endl;
}
Exemplo n.º 4
0
Resource::PlanIterator::PlanIterator(Resource* r, PyObject* o) :
  res(r), bucketiterator(o), ldplaniter(r ? r->getLoadPlans().begin() : NULL),
  cur_setup(0.0), cur_load(0.0), cur_size(0.0), start_date(NULL), end_date(NULL)
{
  if (!r)
  {
    bucketiterator = NULL;
    throw LogicException("Creating resource plan iterator for NULL resource");
  }

  // Count differently for bucketized and continuous resources
  bucketized = (r->getType() == *ResourceBuckets::metadata);

  if (bucketized)
  {
    while (ldplaniter != res->getLoadPlans().end() && ldplaniter->getType() != 2)
      ++ldplaniter;
  }
  else
  {
    // Start date of the first bucket
    end_date = PyIter_Next(bucketiterator);
    if (!end_date) throw LogicException("Expecting at least two dates as argument");
    cur_date = PythonData(end_date).getDate();
    prev_date = cur_date;

    // A flag to remember whether this resource has an unavailability calendar.
    hasUnavailability = r->getLocation() && r->getLocation()->getAvailable();
    if (hasUnavailability)
    {
      unavailableIterator = Calendar::EventIterator(res->getLocation()->getAvailable(), cur_date);
      prev_value = unavailableIterator.getBucket() ?
        unavailableIterator.getBucket()->getBool() :
        res->getLocation()->getAvailable()->getDefault()!=0;
    }

    // Advance loadplan iterator just beyond the starting date
    while (ldplaniter != res->getLoadPlans().end() && ldplaniter->getDate() <= cur_date)
    {
      unsigned short tp = ldplaniter->getType();
      if (tp == 4)
        // New max size
        cur_size = ldplaniter->getMax();
      else if (tp == 1)
      {
        const LoadPlan* ldplan = dynamic_cast<const LoadPlan*>(&*ldplaniter);
        if (ldplan->getOperationPlan()->getOperation() == OperationSetup::setupoperation)
          // Setup starting or ending
          cur_setup = ldplan->getQuantity() < 0 ? 0.0 : cur_size;
        else
          // Normal load
          cur_load = ldplan->getOnhand();
      }
      ++ldplaniter;
    }
  }
}
Exemplo n.º 5
0
DECLARE_EXPORT PyObject* eraseModel(PyObject* self, PyObject* args)
{
  // Pick up arguments
  PyObject *obj = NULL;
  int ok = PyArg_ParseTuple(args, "|O:erase", &obj);
  if (!ok) return NULL;

  // Validate the argument
  bool deleteStaticModel = false;
  if (obj) deleteStaticModel = PythonData(obj).getBool();

  // Execute and catch exceptions
  Py_BEGIN_ALLOW_THREADS   // Free Python interpreter for other threads
  try
  {
    if (deleteStaticModel)
    {
      // Delete all entities.
      // The order is chosen to minimize the work of the individual destructors.
      // E.g. the destructor of the item class recurses over all demands and
      // all buffers. It is much faster if there are none already.
      Operation::clear();
      Demand::clear();
      Buffer::clear();
      Resource::clear();
      SetupMatrix::clear();
      Location::clear();
      Customer::clear();
      Calendar::clear();
      Supplier::clear();
      Item::clear();
      Plan::instance().setName("");
      Plan::instance().setDescription("");
      // The setup operation is a static singleton and should always be around
      OperationSetup::setupoperation = new OperationSetup();
      OperationSetup::setupoperation->setName("setup operation");
    }
    else
    {
      // Delete the operationplans only
      for (Operation::iterator gop = Operation::begin();
          gop != Operation::end(); ++gop)
        gop->deleteOperationPlans();
    }
  }
  catch (...)
  {
    Py_BLOCK_THREADS;
    PythonType::evalException();
    return NULL;
  }
  Py_END_ALLOW_THREADS   // Reclaim Python interpreter
  return Py_BuildValue("");
}
Exemplo n.º 6
0
PyObject* SetupMatrix::addPythonRule(PyObject* self, PyObject* args, PyObject* kwdict)
{
  try
  {
    // Pick up the setup matrix
    SetupMatrix *matrix = static_cast<SetupMatrix*>(self);
    if (!matrix) throw LogicException("Can't add a rule to a nullptr setupmatrix");

    // Parse the arguments
    int prio = 0;
    PyObject *pyfrom = nullptr;
    PyObject *pyto = nullptr;
    long duration = 0;
    double cost = 0;
    static const char *kwlist[] = {"priority", "fromsetup", "tosetup", "duration", "cost", nullptr};
    if (!PyArg_ParseTupleAndKeywords(args, kwdict,
        "i|ssld:addRule",
        const_cast<char**>(kwlist), &prio, &pyfrom, &pyto, &duration, &cost))
      return nullptr;

    // Add the new rule
    SetupMatrixRule *r = new SetupMatrixRule();
    r->setPriority(prio);
    r->setSetupMatrix(matrix);
    if (pyfrom) r->setFromSetup(PythonData(pyfrom).getString());
    if (pyto) r->setToSetup(PythonData(pyfrom).getString());
    r->setDuration(duration);
    r->setCost(cost);
    return PythonData(r);
  }
  catch(...)
  {
    PythonType::evalException();
    return nullptr;
  }
}
Exemplo n.º 7
0
PyObject* ItemSupplier::create(PyTypeObject* pytype, PyObject* args, PyObject* kwds)
{
  try
  {
    // Pick up the supplier
    PyObject* sup = PyDict_GetItemString(kwds,"supplier");
    if (!sup)
      throw DataException("missing supplier on ItemSupplier");
    if (!PyObject_TypeCheck(sup, Supplier::metadata->pythonClass))
      throw DataException("ItemSupplier supplier must be of type supplier");

    // Pick up the item
    PyObject* it = PyDict_GetItemString(kwds,"item");
    if (!it)
      throw DataException("missing item on ItemSupplier");
    if (!PyObject_TypeCheck(it, Item::metadata->pythonClass))
      throw DataException("ItemSupplier item must be of type item");

    // Pick up the priority
    PyObject* q1 = PyDict_GetItemString(kwds,"priority");
    int q2 = q1 ? PythonData(q1).getInt() : 1;

    // Pick up the effective dates
    DateRange eff;
    PyObject* eff_start = PyDict_GetItemString(kwds,"effective_start");
    if (eff_start)
    {
      PythonData d(eff_start);
      eff.setStart(d.getDate());
    }
    PyObject* eff_end = PyDict_GetItemString(kwds,"effective_end");
    if (eff_end)
    {
      PythonData d(eff_end);
      eff.setEnd(d.getDate());
    }

    // Create the ItemSupplier
    ItemSupplier *l = new ItemSupplier(
      static_cast<Supplier*>(sup),
      static_cast<Item*>(it),
      q2, eff
    );

    // Iterate over extra keywords, and set attributes.   @todo move this responsibility to the readers...
    if (l)
    {
      PyObject *key, *value;
      Py_ssize_t pos = 0;
      while (PyDict_Next(kwds, &pos, &key, &value))
      {
        PythonData field(value);
        PyObject* key_utf8 = PyUnicode_AsUTF8String(key);
        DataKeyword attr(PyBytes_AsString(key_utf8));
        Py_DECREF(key_utf8);
        if (!attr.isA(Tags::effective_end) && !attr.isA(Tags::effective_start)
          && !attr.isA(Tags::supplier) && !attr.isA(Tags::item)
          && !attr.isA(Tags::type) && !attr.isA(Tags::priority) && !attr.isA(Tags::action))
        {
          const MetaFieldBase* fmeta = l->getType().findField(attr.getHash());
          if (!fmeta && l->getType().category)
            fmeta = l->getType().category->findField(attr.getHash());
          if (fmeta)
            // Update the attribute
            fmeta->setField(l, field);
          else
            l->setProperty(attr.getName(), value);
        }
      };
    }

    // Return the object
    Py_INCREF(l);
    return static_cast<PyObject*>(l);
  }
  catch (...)
  {
    PythonType::evalException();
    return nullptr;
  }
}
Exemplo n.º 8
0
PyObject* Load::create(PyTypeObject* pytype, PyObject* args, PyObject* kwds)
{
  try
  {
    // Pick up the operation
    PyObject* oper = PyDict_GetItemString(kwds,"operation");
    if (!oper)
      throw DataException("missing operation on Load");
    if (!PyObject_TypeCheck(oper, Operation::metadata->pythonClass))
      throw DataException("load operation must be of type operation");

    // Pick up the resource
    PyObject* res = PyDict_GetItemString(kwds,"resource");
    if (!res)
      throw DataException("missing resource on Load");
    if (!PyObject_TypeCheck(res, Resource::metadata->pythonClass))
      throw DataException("load resource must be of type resource");

    // Pick up the quantity
    PyObject* q1 = PyDict_GetItemString(kwds,"quantity");
    double q2 = q1 ? PythonData(q1).getDouble() : 1.0;

    // Pick up the effective dates
    DateRange eff;
    PyObject* eff_start = PyDict_GetItemString(kwds,"effective_start");
    if (eff_start)
    {
      PythonData d(eff_start);
      eff.setStart(d.getDate());
    }
    PyObject* eff_end = PyDict_GetItemString(kwds,"effective_end");
    if (eff_end)
    {
      PythonData d(eff_end);
      eff.setEnd(d.getDate());
    }

    // Create the load
    Load *l = new LoadDefault(
      static_cast<Operation*>(oper),
      static_cast<Resource*>(res),
      q2, eff
    );

    // Iterate over extra keywords, and set attributes.   @todo move this responsibility to the readers...
    if (l)
    {
      PyObject *key, *value;
      Py_ssize_t pos = 0;
      while (PyDict_Next(kwds, &pos, &key, &value))
      {
        PythonData field(value);
        PyObject* key_utf8 = PyUnicode_AsUTF8String(key);
        DataKeyword attr(PyBytes_AsString(key_utf8));
        Py_DECREF(key_utf8);
        if (!attr.isA(Tags::effective_end) && !attr.isA(Tags::effective_start)
          && !attr.isA(Tags::operation) && !attr.isA(Tags::resource)
          && !attr.isA(Tags::quantity) && !attr.isA(Tags::type)
          && !attr.isA(Tags::action))
        {
          const MetaFieldBase* fmeta = l->getType().findField(attr.getHash());
          if (!fmeta && l->getType().category)
            fmeta = l->getType().category->findField(attr.getHash());
          if (fmeta)
            // Update the attribute
            fmeta->setField(l, field);
          else
            l->setProperty(attr.getName(), value);;
        }
      };
    }

    // Return the object
    Py_INCREF(l);
    return static_cast<PyObject*>(l);
  }
  catch (...)
  {
    PythonType::evalException();
    return NULL;
  }
}
Exemplo n.º 9
0
PyObject* Resource::PlanIterator::iternext()
{
  // Reset counters
  bucket_available = 0.0;
  bucket_unavailable = 0.0;
  bucket_load = 0.0;
  bucket_setup = 0.0;

  if (bucketized)
  {
    if (ldplaniter == res->getLoadPlans().end())
      // No more resource buckets
      return NULL;
    else
    {
      // At this point ldplaniter points to a bucket start event.
      if (start_date) Py_DECREF(start_date);
      if (end_date)
        start_date = end_date;
      else
        start_date = PythonData(ldplaniter->getDate());
      bucket_available = ldplaniter->getOnhand();
    }
    // Advance the loadplan iterator to the start of the next bucket
    ++ldplaniter;
    while (ldplaniter != res->getLoadPlans().end() && ldplaniter->getType() != 2)
    {
      if (ldplaniter->getType() == 1)
        bucket_load -= ldplaniter->getQuantity();
      ++ldplaniter;
    }
    if (ldplaniter == res->getLoadPlans().end())
      end_date = PythonData(Date::infiniteFuture);
    else
      end_date = PythonData(ldplaniter->getDate());
  }
  else
  {
    // Get the start and end date of the current bucket
    if (start_date) Py_DECREF(start_date);
    start_date = end_date;
    end_date = PyIter_Next(bucketiterator);
    if (!end_date) return NULL;
    cur_date = PythonData(end_date).getDate();

    // Measure from beginning of the bucket till the first event in this bucket
    if (ldplaniter != res->getLoadPlans().end() && ldplaniter->getDate() < cur_date)
      update(ldplaniter->getDate());

    // Advance the loadplan iterator to the next event date
    while (ldplaniter != res->getLoadPlans().end() && ldplaniter->getDate() <= cur_date)
    {
      // Measure from the previous event till the current one
      update(ldplaniter->getDate());

      // Process the event
      unsigned short tp = ldplaniter->getType();
      if (tp == 4)
        // New max size
        cur_size = ldplaniter->getMax();
      else if (tp == 1)
      {
        const LoadPlan* ldplan = dynamic_cast<const LoadPlan*>(&*ldplaniter);
        assert(ldplan);
        if (ldplan->getOperationPlan()->getOperation() == OperationSetup::setupoperation)
          // Setup starting or ending
          cur_setup = ldplan->getQuantity() < 0 ? 0.0 : cur_size;
        else
          // Normal load
          cur_load = ldplan->getOnhand();
      }

      // Move to the next event
      ++ldplaniter;
    }

    // Measure from the previous event till the end of the bucket
    update(cur_date);

    // Convert from seconds to hours
    bucket_available /= 3600;
    bucket_load /= 3600;
    bucket_unavailable /= 3600;
    bucket_setup /= 3600;
  }

  // Return the result
  return Py_BuildValue("{s:O,s:O,s:d,s:d,s:d,s:d,s:d}",
    "start", start_date,
    "end", end_date,
    "available", bucket_available,
    "load", bucket_load,
    "unavailable", bucket_unavailable,
    "setup", bucket_setup,
    "free", bucket_available - bucket_load - bucket_setup);
}
Exemplo n.º 10
0
extern "C" PyObject* OperationItemSupplier::createOrder(
  PyObject *self, PyObject *args, PyObject *kwdict
  )
{
  // Parse the Python arguments
  PyObject* pylocation = NULL;
  unsigned long id = 0;
  const char* ref = NULL;
  PyObject* pyitem = NULL;
  PyObject* pysupplier = NULL;
  double qty = 0;
  PyObject* pystart = NULL;
  PyObject* pyend = NULL;
  const char* status = NULL;
  const char* source = NULL;
  static const char *kwlist[] = {
    "location", "id", "reference", "item", "supplier", "quantity", "start",
    "end", "status", "source", NULL
    };
  int ok = PyArg_ParseTupleAndKeywords(
    args, kwdict, "|OkzOOdOOzz:createOrder", const_cast<char**>(kwlist),
    &pylocation, &id, &ref, &pyitem, &pysupplier, &qty, &pystart,
    &pyend, &status, &source
    );
  if (!ok)
    return NULL;
  Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast;
  Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast;

  // Validate all arguments
  if (!pylocation || !pyitem)
  {
    PyErr_SetString(PythonDataException, "item and location arguments are mandatory");
    return NULL;
  }
  PythonData location_tmp(pylocation);
  if (!location_tmp.check(Location::metadata))
  {
    PyErr_SetString(PythonDataException, "location argument must be of type location");
    return NULL;
  }
  PythonData item_tmp(pyitem);
  if (!item_tmp.check(Item::metadata))
  {
    PyErr_SetString(PythonDataException, "item argument must be of type item");
    return NULL;
  }
  PythonData supplier_tmp(pysupplier);
  if (pysupplier && !supplier_tmp.check(Supplier::metadata))
  {
    PyErr_SetString(PythonDataException, "supplier argument must be of type supplier");
    return NULL;
  }
  Item *item = static_cast<Item*>(item_tmp.getObject());
  Location *location = static_cast<Location*>(location_tmp.getObject());
  Supplier *supplier = pysupplier ? static_cast<Supplier*>(supplier_tmp.getObject()) : NULL;

  // Find or create the destination buffer.
  Buffer* destbuffer = NULL;
  Item::bufferIterator buf_iter(item);
  while (Buffer* tmpbuf = buf_iter.next())
  {
    if (tmpbuf->getLocation() == location)
    {
      if (destbuffer)
      {
        stringstream o;
        o << "Multiple buffers found for item '" << item << "'' and location'" << location << "'";
        throw DataException(o.str());
      }
      destbuffer = tmpbuf;
    }
  }
  if (!destbuffer)
  {
    // Create the destination buffer
    destbuffer = new BufferDefault();
    stringstream o;
    o << item << " @ " << location;
    destbuffer->setName(o.str());
    destbuffer->setItem(item);
    destbuffer->setLocation(location);
  }

  // Build the producing operation for this buffer.
  destbuffer->getProducingOperation();

  // Look for a matching operation replenishing this buffer.
  Operation *oper = NULL;
  for (Buffer::flowlist::const_iterator flowiter = destbuffer->getFlows().begin();
    flowiter != destbuffer->getFlows().end() && !oper; ++flowiter)
  {
    if (flowiter->getOperation()->getType() != *OperationItemSupplier::metadata)
      continue;
    OperationItemSupplier* opitemsupplier = static_cast<OperationItemSupplier*>(flowiter->getOperation());
    if (supplier)
    {
      if (supplier->isMemberOf(opitemsupplier->getItemSupplier()->getSupplier()))
        oper = opitemsupplier;
    }
    else
      oper = opitemsupplier;
  }

  // No matching operation is found.
  if (!oper)
  {
    // We'll create one now, but that requires that we have a supplier defined.
    if (!supplier)
      throw DataException("Supplier is needed on this purchase order");
    // Note: We know that we need to create a new one. An existing one would
    // have created an operation on the buffer already.
    ItemSupplier *itemsupplier = new ItemSupplier();
    itemsupplier->setSupplier(supplier);
    itemsupplier->setItem(item);
    itemsupplier->setLocation(location);
    oper = new OperationItemSupplier(itemsupplier, destbuffer);
    new ProblemInvalidData(oper, "Purchase orders on unauthorized supplier", "operation",
      Date::infinitePast, Date::infiniteFuture, 1);
  }

  // Finally, create the operationplan
  OperationPlan *opplan = oper->createOperationPlan(qty, start, end);
  if (id)
    opplan->setRawIdentifier(id);  // We can use this fast method because we call activate later
  if (status)
    opplan->setStatus(status);
  // Reset quantity after the status update to assure that
  // also non-valid quantities are getting accepted.
  opplan->setQuantity(qty);
  if (ref)
    opplan->setReference(ref);
  opplan->activate();

  // Return result
  Py_INCREF(opplan);
  return opplan;
}
Exemplo n.º 11
0
PyObject* OperationPlan::updateFeasiblePython(PyObject* self, PyObject* args)
{
  return PythonData(static_cast<OperationPlan*>(self)->updateFeasible());
}
Exemplo n.º 12
0
extern "C" PyObject* OperationItemSupplier::createOrder(
  PyObject *self, PyObject *args, PyObject *kwdict
  )
{
  // Parse the Python arguments
  PyObject* pylocation = NULL;
  unsigned long id = 0;
  const char* ref = NULL;
  PyObject* pyitem = NULL;
  PyObject* pysupplier = NULL;
  double qty = 0;
  PyObject* pystart = NULL;
  PyObject* pyend = NULL;
  const char* status = NULL;
  const char* source = NULL;
  static const char *kwlist[] = {
    "location", "id", "reference", "item", "supplier", "quantity", "start",
    "end", "status", "source", NULL
    };
  int ok = PyArg_ParseTupleAndKeywords(
    args, kwdict, "|OkzOOdOOzz:createOrder", const_cast<char**>(kwlist),
    &pylocation, &id, &ref, &pyitem, &pysupplier, &qty, &pystart,
    &pyend, &status, &source
    );
  if (!ok)
    return NULL;
  Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast;
  Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast;

  // Validate all arguments
  if (!pylocation || !pyitem)
  {
    PyErr_SetString(PythonDataException, "item and location arguments are mandatory");
    return NULL;
  }
  PythonData location_tmp(pylocation);
  if (!location_tmp.check(Location::metadata))
  {
    PyErr_SetString(PythonDataException, "location argument must be of type location");
    return NULL;
  }
  PythonData item_tmp(pyitem);
  if (!item_tmp.check(Item::metadata))
  {
    PyErr_SetString(PythonDataException, "item argument must be of type item");
    return NULL;
  }
  PythonData supplier_tmp(pysupplier);
  if (pysupplier && !supplier_tmp.check(Supplier::metadata))
  {
    PyErr_SetString(PythonDataException, "supplier argument must be of type supplier");
    return NULL;
  }
  Item *item = static_cast<Item*>(item_tmp.getObject());
  Location *location = static_cast<Location*>(location_tmp.getObject());
  Supplier *supplier = pysupplier ? static_cast<Supplier*>(supplier_tmp.getObject()) : NULL;

  // Find or create the destination buffer.
  Buffer* destbuffer = NULL;
  for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter)
  {
    if (bufiter->getLocation() == location && bufiter->getItem() == item)
    {
      if (destbuffer)
      {
        stringstream o;
        o << "Multiple buffers found for item '" << item << "'' and location'" << location << "'";
        throw DataException(o.str());
      }
      destbuffer = &*bufiter;
    }
  }
  if (!destbuffer)
  {
    // Create the destination buffer
    destbuffer = new BufferDefault();
    stringstream o;
    o << item << " @ " << location;
    destbuffer->setName(o.str());
    destbuffer->setItem(item);
    destbuffer->setLocation(location);
  }

  // Look for a matching matching supplying operation on this buffer.
  // Here we also trigger the creation of its producing operation, which
  // contains the logic to build possible transfer operations.
  Operation *oper = NULL;
  Operation* prodOper = destbuffer->getProducingOperation();
  if (prodOper && prodOper->getType() == *OperationItemSupplier::metadata)
  {
    if (supplier)
    {
      if (supplier->isMemberOf(static_cast<OperationItemSupplier*>(prodOper)->getItemSupplier()->getSupplier()))
        oper = prodOper;
    }
    else
      oper = prodOper;
  }
  else if (prodOper && prodOper->getType() == *OperationAlternate::metadata)
  {
    SubOperation::iterator soperiter = prodOper->getSubOperationIterator();
    while (SubOperation *soper = soperiter.next())
    {
      if (soper->getType() == *OperationItemSupplier::metadata)
      {
        if (supplier)
        {
          if (supplier->isMemberOf(static_cast<OperationItemSupplier*>(prodOper)->getItemSupplier()->getSupplier()))
          {
            oper = soper->getOperation();
            break;
          }
        }
        else
        {
          oper = prodOper;
          break;
        }
      }
    }
  }

  // No matching operation is found.
  if (!oper)
  {
    // We'll create one now, but that requires that we have a supplier defined.
    if (!supplier)
      throw DataException("Supplier is needed on this purchase order");
    // Note: We know that we need to create a new one. An existing one would
    // have created an operation on the buffer already.
    ItemSupplier *itemsupplier = new ItemSupplier();
    itemsupplier->setSupplier(supplier);
    itemsupplier->setItem(item);
    itemsupplier->setLocation(location);
    oper = new OperationItemSupplier(itemsupplier, destbuffer);
    new ProblemInvalidData(oper, "Purchase orders on unauthorized supplier", "operation",
      Date::infinitePast, Date::infiniteFuture, 1);
  }

  // Finally, create the operationplan
  OperationPlan *opplan = oper->createOperationPlan(qty, start, end);
  if (status)
    opplan->setStatus(status);
  if (ref)
    opplan->setReference(ref);

  // Return result
  Py_INCREF(opplan);
  return opplan;
}
Exemplo n.º 13
0
extern "C" PyObject* OperationItemDistribution::createOrder(
  PyObject *self, PyObject *args, PyObject *kwdict
  )
{
  // Parse the Python arguments
  PyObject* pydest = NULL;
  unsigned long id = 0;
  const char* ref = NULL;
  PyObject* pyitem = NULL;
  PyObject* pyorigin = NULL;
  double qty = 0;
  PyObject* pystart = NULL;
  PyObject* pyend = NULL;
  int consume = 1;
  const char* status = NULL;
  const char* source = NULL;
  static const char *kwlist[] = {
    "destination", "id", "reference", "item", "origin", "quantity", "start",
    "end", "consume_material", "status", "source", NULL
    };
  int ok = PyArg_ParseTupleAndKeywords(
    args, kwdict, "|OkzOOdOOpzz:createOrder", const_cast<char**>(kwlist),
    &pydest, &id, &ref, &pyitem, &pyorigin, &qty, &pystart, &pyend,
    &consume, &status, &source
    );
  if (!ok)
    return NULL;
  Date start = pystart ? PythonData(pystart).getDate() : Date::infinitePast;
  Date end = pyend ? PythonData(pyend).getDate() : Date::infinitePast;

  // Validate all arguments
  if (!pydest || !pyitem)
  {
    PyErr_SetString(PythonDataException, "item and destination arguments are mandatory");
    return NULL;
  }
  PythonData dest_tmp(pydest);
  if (!dest_tmp.check(Location::metadata))
  {
    PyErr_SetString(PythonDataException, "destination argument must be of type location");
    return NULL;
  }
  PythonData item_tmp(pyitem);
  if (!item_tmp.check(Item::metadata))
  {
    PyErr_SetString(PythonDataException, "item argument must be of type item");
    return NULL;
  }
  PythonData origin_tmp(pyorigin);
  if (pyorigin && !origin_tmp.check(Location::metadata))
  {
    PyErr_SetString(PythonDataException, "origin argument must be of type location");
    return NULL;
  }
  Item *item = static_cast<Item*>(item_tmp.getObject());
  Location *dest = static_cast<Location*>(dest_tmp.getObject());
  Location *origin = pyorigin ? static_cast<Location*>(origin_tmp.getObject()) : NULL;

  // Find or create the destination buffer.
  Buffer* destbuffer = NULL;
  for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter)
  {
    if (bufiter->getLocation() == dest && bufiter->getItem() == item)
    {
      if (destbuffer)
      {
        stringstream o;
        o << "Multiple buffers found for item '" << item << "'' and location'" << dest << "'";
        throw DataException(o.str());
      }
      destbuffer = &*bufiter;
    }
  }
  if (!destbuffer)
  {
    // Create the destination buffer
    destbuffer = new BufferDefault();
    stringstream o;
    o << item << " @ " << dest;
    destbuffer->setName(o.str());
    destbuffer->setItem(item);
    destbuffer->setLocation(dest);
  }

  // Build the producing operation for this buffer.
  destbuffer->getProducingOperation();

  // Look for a matching operation replenishing this buffer.
  Operation *oper = NULL;
  for (Buffer::flowlist::const_iterator flowiter = destbuffer->getFlows().begin();
    flowiter != destbuffer->getFlows().end() && !oper; ++flowiter)
  {
    if (flowiter->getOperation()->getType() != *OperationItemDistribution::metadata
      || flowiter->getQuantity() <= 0)
        continue;
    OperationItemDistribution* opitemdist = static_cast<OperationItemDistribution*>(flowiter->getOperation());
    if (origin)
    {
      // Origin must match as well
      for (Operation::flowlist::const_iterator fl = opitemdist->getFlows().begin();
          fl != opitemdist->getFlows().end(); ++ fl)
      {
        if (fl->getQuantity() < 0 && fl->getBuffer()->getLocation()->isMemberOf(origin))
          oper = opitemdist;
      }
    }
    else
      oper = opitemdist;
  }

  // No matching operation is found.
  if (!oper)
  {
    // We'll create one now, but that requires that we have an origin defined.
    if (!origin)
      throw DataException("Origin location is needed on this distribution order");
    Buffer* originbuffer = NULL;
    for (Buffer::iterator bufiter = Buffer::begin(); bufiter != Buffer::end(); ++bufiter)
    {
      if (bufiter->getLocation() == origin && bufiter->getItem() == item)
      {
        if (originbuffer)
        {
          stringstream o;
          o << "Multiple buffers found for item '" << item << "'' and location'" << dest << "'";
          throw DataException(o.str());
        }
        originbuffer = &*bufiter;
      }
    }
    if (!originbuffer)
    {
      // Create the origin buffer
      originbuffer = new BufferDefault();
      stringstream o;
      o << item << " @ " << origin;
      originbuffer->setName(o.str());
      originbuffer->setItem(item);
      originbuffer->setLocation(origin);
    }
    // Note: We know that we need to create a new one. An existing one would
    // have created an operation on the buffer already.
    ItemDistribution *itemdist = new ItemDistribution();
    itemdist->setOrigin(origin);
    itemdist->setItem(item);
    itemdist->setDestination(dest);
    oper = new OperationItemDistribution(itemdist, originbuffer, destbuffer);
    new ProblemInvalidData(oper, "Distribution orders on unauthorized lanes", "operation",
      Date::infinitePast, Date::infiniteFuture, 1);
  }

  // Finally, create the operationplan
  OperationPlan *opplan = oper->createOperationPlan(qty, start, end, NULL, NULL, 0, false);
  if (id)
    opplan->setIdentifier(id);
  if (status)
    opplan->setStatus(status);
  if (ref)
    opplan->setReference(ref);
  if (!consume)
    opplan->setConsumeMaterial(false);
  opplan->activate();

  // Return result
  Py_INCREF(opplan);
  return opplan;
}
Exemplo n.º 14
0
/** @todo The flow quantity is handled at the wrong place. It needs to be
  * handled by the operation, since flows can exist on multiple suboperations
  * with different quantities. The buffer solve can't handle this, because
  * it only calls the solve() for the producing operation...
  * Are there some situations where the operation solver doesn't know enough
  * on the buffer behavior???
  */
void SolverMRP::solve(const Buffer* b, void* v)
{
  // Call the user exit
  SolverMRPdata* data = static_cast<SolverMRPdata*>(v);
  if (userexit_buffer) userexit_buffer.call(b, PythonData(data->constrainedPlanning));

  // Verify the iteration limit isn't exceeded.
  if (data->getSolver()->getIterationMax()
    && ++data->iteration_count > data->getSolver()->getIterationMax())
  {
    ostringstream ch;
    ch << "Maximum iteration count " << data->getSolver()->getIterationMax() << " exceeded";
    throw RuntimeException(ch.str());
  }

  // Safety stock planning is refactored to a separate method
  double requested_qty(data->state->q_qty);
  if (requested_qty == -1.0)
  {
    solveSafetyStock(b,v);
    return;
  }
  Date requested_date(data->state->q_date);
  bool tried_requested_date(false);

  // Message
  if (data->getSolver()->getLogLevel()>1)
    logger << indent(b->getLevel()) << "  Buffer '" << b->getName()
      << "' is asked: " << data->state->q_qty << "  " << data->state->q_date << endl;

  // Store the last command in the list, in order to undo the following
  // commands if required.
  CommandManager::Bookmark* topcommand = data->setBookmark();
  OperationPlan *prev_owner_opplan = data->state->curOwnerOpplan;

  // Evaluate the buffer profile and solve shortages by asking more material.
  // The loop goes from the requested date till the very end. Whenever the
  // event date changes, we evaluate if a shortage exists.
  Date currentDate;
  const TimeLine<FlowPlan>::Event *prev = nullptr;
  double shortage(0.0);
  Date extraSupplyDate(Date::infiniteFuture);
  Date extraInventoryDate(Date::infiniteFuture);
  double cumproduced = (b->getFlowPlans().rbegin() == b->getFlowPlans().end())
    ? 0
    : b->getFlowPlans().rbegin()->getCumulativeProduced();
  double current_minimum(0.0);
  double unconfirmed_supply(0.0);
  for (Buffer::flowplanlist::const_iterator cur=b->getFlowPlans().begin();
      ; ++cur)
  {
    if(&*cur && cur->getEventType() == 1)
    {
      const FlowPlan* fplan = static_cast<const FlowPlan*>(&*cur);
      if (!fplan->getOperationPlan()->getRawIdentifier()
        && fplan->getQuantity()>0
        && fplan->getOperationPlan()->getOperation() != b->getProducingOperation())
          unconfirmed_supply += fplan->getQuantity();
    }

    // Iterator has now changed to a new date or we have arrived at the end.
    // If multiple flows are at the same moment in time, we are not interested
    // in the inventory changes. It gets interesting only when a certain
    // inventory level remains unchanged for a certain time.
    if ((cur == b->getFlowPlans().end() || cur->getDate()>currentDate) && prev)
    {
      // Some variables
      Date theDate = prev->getDate();
      double theOnHand = prev->getOnhand();
      double theDelta = theOnHand - current_minimum + shortage;

      // Evaluate the situation at the last flowplan before the date change.
      // Is there a shortage at that date?
      // We have 3 ways to resolve it:
      //  - Scan backward for a producer we can combine with to make a
      //    single batch.
      //  - Scan forward for producer we can replace in a single batch.
      //  - Create new supply for the shortage at that date.

      // Solution one: we scan backward in time for producers we can merge with.
      if (theDelta < -ROUNDING_ERROR
        && b->getMinimumInterval() >= 0L
        && prev
        && prev->getDate() >= theDate - b->getMinimumInterval())
      {
        Operation *prevOper = nullptr;
        DateRange prevDates;
        double prevQty = 0.0;
        Buffer::flowplanlist::const_iterator prevbatchiter = b->getFlowPlans().end();
        for (Buffer::flowplanlist::const_iterator batchiter = prev;
          batchiter != b->getFlowPlans().end() && batchiter->getDate() >= theDate - b->getMinimumInterval();
          prevbatchiter = batchiter--)
        {
          // Check if it is an unlocked producing operationplan
          if (batchiter->getQuantity() <= 0) continue;
          const FlowPlan* batchcandidate = nullptr;
          if (batchiter->getEventType() == 1)
            batchcandidate = static_cast<const FlowPlan*>(&*batchiter);
          if (!batchcandidate || batchcandidate->getOperationPlan()->getLocked())
            continue;

          // Store date and quantity of the candidate
          Date batchdate = batchcandidate->getDate();
          double batchqty = batchcandidate->getOperationPlan()->getTotalFlow(b) - theDelta;
          double consumed_in_window = b->getFlowPlans().getFlow(batchcandidate, b->getMinimumInterval(), true);
          if (batchqty > consumed_in_window)
            batchqty = consumed_in_window;
          Operation* candidate_operation = batchcandidate->getOperationPlan()->getOperation();
          DateRange candidate_dates = batchcandidate->getOperationPlan()->getDates();
          double candidate_qty = batchcandidate->getOperationPlan()->getQuantity();

          // Verify we haven't tried the same kind of candidate before
          if (candidate_operation == prevOper
            && candidate_dates == prevDates
            && fabs(candidate_qty - prevQty) < ROUNDING_ERROR)
              continue;
          prevOper = candidate_operation;
          prevDates = candidate_dates;
          prevQty = candidate_qty;

          // Delete existing producer, and propagate the deletion upstream
          CommandManager::Bookmark* batchbookmark = data->setBookmark();
          data->operator_delete->solve(batchcandidate->getOperationPlan());

          // Create new producer
          short loglevel = data->getSolver()->getLogLevel();
          try
          {
            data->getSolver()->setLogLevel(0);
            data->state->curBuffer = const_cast<Buffer*>(b);
            data->state->q_qty = batchqty;
            // We need to add the post-operation time, because the operation
            // solver will subtract it again. For merging operationaplans we
            // want to plan *exactly* at the date of the existing operationplan.
            data->state->q_date =
              batchdate + b->getProducingOperation()->getPostTime();
            data->state->curOwnerOpplan = nullptr;
            b->getProducingOperation()->solve(*this, v);
          }
          catch (...)
          {
            data->getSolver()->setLogLevel(loglevel);
            throw;
          }
          data->getSolver()->setLogLevel(loglevel);

          // Check results
          if (data->state->a_qty < batchqty - ROUNDING_ERROR)
          {
            // It didn't work.
            if (loglevel > 1)
              logger << indent(b->getLevel())
                << "  Rejected resized batch '" << candidate_operation
                << "' " << candidate_dates << " " << candidate_qty << endl;
            data->rollback(batchbookmark);
            // Assure batchiter remains valid
            batchiter = prevbatchiter;
            if (batchiter != b->getFlowPlans().end())
              --batchiter;
          }
          else
          {
            // It worked.
            if (loglevel > 1)
              logger << indent(b->getLevel())
                << "  Accepting resized batch '" << candidate_operation
                << "' " << candidate_dates << " " << candidate_qty << endl;

            theDelta = 0.0;
            break;
          }
          // Assure the prev pointer remains valid after this loop
          Buffer::flowplanlist::const_iterator c = cur;
          --c;
          if (c == b->getFlowPlans().end())
          {
            c = b->getFlowPlans().rbegin();
            if (c == b->getFlowPlans().end())
              prev = nullptr;
            else
              prev = &*c;
          }
          else
            prev = &*c;
        }
      }

      // Solution two: we scan forward in time for producers we can replace.
      if (theDelta < -ROUNDING_ERROR
        && b->getMinimumInterval() >= 0L
        && cur != b->getFlowPlans().end()
        && cur->getDate() <= theDate + b->getMinimumInterval())
      {
        Operation *prevOper = nullptr;
        DateRange prevDates;
        double prevQty = 0.0;
        Buffer::flowplanlist::const_iterator prevbatchiter = b->getFlowPlans().end();
        for (Buffer::flowplanlist::const_iterator batchiter = cur;
          batchiter != b->getFlowPlans().end() && batchiter->getDate() <= theDate + b->getMinimumInterval();
          prevbatchiter = batchiter++)
        {
          // Check if it is an unlocked producing operationplan
          if (batchiter->getQuantity() <= 0) continue;
          const FlowPlan* batchcandidate = nullptr;
          if (batchiter->getEventType() == 1)
            batchcandidate = static_cast<const FlowPlan*>(&*batchiter);
          if (!batchcandidate || batchcandidate->getOperationPlan()->getLocked())
            continue;

          // Store date and quantity of the candidate
          double batchqty = batchcandidate->getQuantity()- theDelta;
          double consumed_in_window = b->getFlowPlans().getFlow(prev, b->getMinimumInterval(), true);
          if (batchqty > consumed_in_window)
            batchqty = consumed_in_window;
          Operation* candidate_operation = batchcandidate->getOperationPlan()->getOperation();
          DateRange candidate_dates = batchcandidate->getOperationPlan()->getDates();
          double candidate_qty = batchcandidate->getOperationPlan()->getQuantity();

          // Verify we haven't tried the same kind of candidate before
          if (candidate_operation == prevOper
            && prevDates == candidate_dates
            && fabs(candidate_qty - prevQty) < ROUNDING_ERROR)
              continue;
          prevOper = candidate_operation;
          prevDates = candidate_dates;
          prevQty = candidate_qty;

          // Delete existing producer, and propagate the deletion upstream
          CommandManager::Bookmark* batchbookmark = data->setBookmark();
          data->operator_delete->solve(batchcandidate->getOperationPlan());

          // Create new producer
          short loglevel = data->getSolver()->getLogLevel();
          try
          {
            data->getSolver()->setLogLevel(0);
            data->state->curBuffer = const_cast<Buffer*>(b);
            data->state->q_qty = batchqty;
            data->state->q_date = theDate;
            data->state->curOwnerOpplan = nullptr;
            b->getProducingOperation()->solve(*this, v);
          }
          catch (...)
          {
            data->getSolver()->setLogLevel(loglevel);
            throw;
          }
          data->getSolver()->setLogLevel(loglevel);

          // Check results
          if (data->state->a_qty < batchqty - ROUNDING_ERROR)
          {
            // It didn't work.
            if (loglevel > 1)
              logger << indent(b->getLevel())
                << "  Rejected joining batch with '" << candidate_operation
                << "' " << candidate_dates << " " << candidate_qty << endl;
            data->rollback(batchbookmark);
            // Assure batchiter remains valid
            batchiter = prevbatchiter;
            if (batchiter != b->getFlowPlans().end())
              ++batchiter;
          }
          else
          {
            // It worked.
            if (loglevel > 1)
              logger << indent(b->getLevel())
                << "  Accepted joining batch with '" << candidate_operation
                << "' " << candidate_dates << " " << candidate_qty << endl;
            theDelta = 0.0;
            // Assure the cur iterator remains valid after this loop
            cur = prev;
            if (cur != b->getFlowPlans().end())
              ++cur;
            break;
          }
          // Assure the cur iterator remains valid after this loop
          cur = prev;
          if (cur != b->getFlowPlans().end())
            ++cur;
        }
      }

      // Solution three: create supply at the shortage date itself
      if (theDelta < -ROUNDING_ERROR)
      {
        // Can we get extra supply to solve the problem, or part of it?
        // If the shortage already starts before the requested date, it
        // was not created by the newly added flowplan, but existed before.
        // We don't consider this as a shortage for the current flowplan,
        // and we want our flowplan to try to repair the previous problems
        // if it can...
        bool loop = true;
        while (b->getProducingOperation() && theDate >= requested_date && loop)
        {
          // Create supply
          data->state->curBuffer = const_cast<Buffer*>(b);
          data->state->q_qty = -theDelta;
          data->state->q_date = theDate;

          // Check whether this date doesn't match with the requested date.
          // See a bit further why this is required.
          if (data->state->q_date == requested_date) tried_requested_date = true;

          // Make sure the new operationplans don't inherit an owner.
          // When an operation calls the solve method of suboperations, this field is
          // used to pass the information about the owner operationplan down. When
          // solving for buffers we must make sure NOT to pass owner information.
          // At the end of solving for a buffer we need to restore the original
          // settings...
          data->state->curOwnerOpplan = nullptr;

          // Note that the supply created with the next line changes the
          // onhand value at all later dates!
          b->getProducingOperation()->solve(*this,v);

          // Evaluate the reply date. The variable extraSupplyDate will store
          // the date when the producing operation tells us it can get extra
          // supply.
          if (data->state->a_date < extraSupplyDate
              && data->state->a_date > requested_date)
            extraSupplyDate = data->state->a_date;

          // If we got some extra supply, we retry to get some more supply.
          // Only when no extra material is obtained, we give up.
          // When solving for safety stock or when the parameter allowsplit is
          // set to false we need to get a single replenishing operationplan.
          if (data->state->a_qty > ROUNDING_ERROR
              && data->state->a_qty < -theDelta - ROUNDING_ERROR
              && ((data->getSolver()->getAllowSplits() && !data->safety_stock_planning)
               || data->state->a_qty == b->getProducingOperation()->getSizeMaximum())
             )
            theDelta += data->state->a_qty;
          else
            loop = false;
        }

        // Not enough supply was received to repair the complete problem
        if (prev && prev->getOnhand() + shortage < -ROUNDING_ERROR)
        {
          // Keep track of the shorted quantity.
          // Only consider shortages later than the requested date.
          if (theDate >= requested_date)
            shortage = -prev->getOnhand();

          // Reset the date from which excess material is in the buffer. This
          // excess material can be used to compute the date when the buffer
          // can be asked again for additional supply.
          extraInventoryDate = Date::infiniteFuture;
        }
      }
      else if (theDelta > unconfirmed_supply + ROUNDING_ERROR)
        // There is excess material at this date (coming from planned/frozen
        // material arrivals, surplus material created by lotsized operations,
        // etc...)
        // The unconfirmed_supply element is required to exclude any of the
        // excess inventory we may have caused ourselves. Such situations are
        // possible when there are loops in the supply chain.
        if (theDate > requested_date
            && extraInventoryDate == Date::infiniteFuture)
          extraInventoryDate = theDate;
    }

    // We have reached the end of the flowplans. Breaking out of the loop
    // needs to be done here because in the next statements we are accessing
    // *cur, which isn't valid at the end of the list
    if (cur == b->getFlowPlans().end()) break;

    // The minimum has changed.
    // Note that these limits can be updated only after the processing of the
    // date change in the statement above. Otherwise the code above would
    // already use the new value before the intended date.
    // If the flag getPlanSafetyStockFirst is set, then we need to replenish
    // up to the minimum quantity. If it is not set (which is the default) then
    // we only replenish up to 0.
    if (cur->getEventType() == 3 && (getPlanSafetyStockFirst() || data->safety_stock_planning))
      current_minimum = cur->getMin();

    // Update the pointer to the previous flowplan.
    prev = &*cur;
    currentDate = cur->getDate();
  }

  // Note: the variable extraInventoryDate now stores the date from which
  // excess material is available in the buffer. The excess
  // We don't need to care how much material is lying there.

  // Check for supply at the requested date
  // Isn't this included in the normal loop? In some cases it is indeed, but
  // sometimes it isn't because in the normal loop there may still have been
  // onhand available and the shortage only shows at a later date than the
  // requested date.
  // E.g. Initial situation:              After extra consumer at time y:
  //      -------+                                --+
  //             |                                  |
  //             +------                            +---+
  //                                                    |
  //    0 -------y------                        0 --y---x-----
  //                                                    |
  //                                                    +-----
  // The first loop only checks for supply at times x and later. If it is not
  // feasible, we now check for supply at time y. It will create some extra
  // inventory, but at least the demand is met.
  // @todo The buffer solver could move backward in time from x till time y,
  // and try multiple dates. This would minimize the excess inventory created.
  while (shortage > ROUNDING_ERROR
      && b->getProducingOperation() && !tried_requested_date)
  {
    // Create supply at the requested date
    data->state->curBuffer = const_cast<Buffer*>(b);
    data->state->q_qty = shortage;
    data->state->q_date = requested_date;
    // Make sure the new operationplans don't inherit an owner.
    // When an operation calls the solve method of suboperations, this field is
    // used to pass the information about the owner operationplan down. When
    // solving for buffers we must make sure NOT to pass owner information.
    // At the end of solving for a buffer we need to restore the original
    // settings...
    data->state->curOwnerOpplan = nullptr;
    // Note that the supply created with the next line changes the onhand value
    // at all later dates!
    // Note that asking at the requested date doesn't keep the material on
    // stock to a minimum.
    if (requested_qty - shortage < ROUNDING_ERROR)
      data->rollback(topcommand);
    b->getProducingOperation()->solve(*this,v);
    // Evaluate the reply
    if (data->state->a_date < extraSupplyDate
        && data->state->a_date > requested_date)
      extraSupplyDate = data->state->a_date;
    if (data->state->a_qty > ROUNDING_ERROR)
      shortage -= data->state->a_qty;
    else
      tried_requested_date = true;
  }

  // Final evaluation of the replenishment
  if (data->constrainedPlanning && data->getSolver()->isConstrained())
  {
    // Use the constrained planning result
    data->state->a_qty = requested_qty - shortage;
    if (data->state->a_qty < ROUNDING_ERROR)
    {
      data->rollback(topcommand);
      data->state->a_qty = 0.0;
    }
    data->state->a_date = (extraInventoryDate < extraSupplyDate) ?
        extraInventoryDate :
        extraSupplyDate;
    // Monitor as a constraint if there is no producing operation.
    // Note that if there is a producing operation the constraint is flagged
    // on the operation instead of on this buffer.
    if (!b->getProducingOperation() && data->logConstraints && shortage > ROUNDING_ERROR && data->planningDemand)
      data->planningDemand->getConstraints().push(ProblemMaterialShortage::metadata,
          b, requested_date, Date::infiniteFuture, shortage);
  }
  else
  {
    // Enough inventory or supply available, or not material constrained.
    // In case of a plan that is not material constrained, the buffer tries to
    // solve for shortages as good as possible. Only in the end we 'lie' about
    // the result to the calling function. Material shortages will then remain
    // in the buffer.
    data->state->a_qty = requested_qty;
    data->state->a_date = Date::infiniteFuture;
  }

  // Restore the owning operationplan.
  data->state->curOwnerOpplan = prev_owner_opplan;

  // Reply quantity must be greater than 0
  assert( data->state->a_qty >= 0 );

  // Increment the cost
  // Only the quantity consumed directly from the buffer is counted.
  // The cost of the material supply taken from producing operations is
  // computed seperately and not considered here.
  if (b->getItem() && data->state->a_qty > 0)
  {
    if (b->getFlowPlans().empty())
      cumproduced = 0.0;
    else
      cumproduced = b->getFlowPlans().rbegin()->getCumulativeProduced() - cumproduced;
    if (data->state->a_qty > cumproduced)
      data->state->a_cost += (data->state->a_qty - cumproduced) * b->getItem()->getPrice();
  }

  // Message
  if (data->getSolver()->getLogLevel()>1)
    logger << indent(b->getLevel()) << "  Buffer '" << b->getName()
        << "' answers: " << data->state->a_qty << "  " << data->state->a_date << "  "
        << data->state->a_cost << "  " << data->state->a_penalty << endl;
}
Exemplo n.º 15
0
DECLARE_EXPORT void SolverMRP::solve(const Demand* l, void* v)
{
  // Set a bookmark at the current command
  SolverMRPdata* data = static_cast<SolverMRPdata*>(v);
  CommandManager::Bookmark* topcommand = data->setBookmark();

  // Create a state stack
  State* mystate = data->state;
  data->push();

  try
  {
    // Call the user exit
    if (userexit_demand) userexit_demand.call(l, PythonData(data->constrainedPlanning));
    short loglevel = data->getSolver()->getLogLevel();

    // Note: This solver method does not push/pop states on the stack.
    // We continue to work on the top element of the stack.

    // Message
    if (loglevel>0)
    {
      logger << "Planning demand '" << l->getName() << "' (" << l->getPriority()
          << ", " << l->getDue() << ", " << l->getQuantity() << ")";
      if (!data->constrainedPlanning || !data->getSolver()->isConstrained())
        logger << " in unconstrained mode";
      logger << endl;
    }

    // Unattach previous delivery operationplans, if required.
    if (data->getSolver()->getErasePreviousFirst())
    {
      // Locked operationplans will NOT be deleted, and a part of the demand can
      // still remain planned.
      const_cast<Demand*>(l)->deleteOperationPlans(false, data);

      // Empty constraint list
      const_cast<Demand*>(l)->getConstraints().clear();
    }

    // Track constraints or not
    data->logConstraints = (getPlanType() == 1);

    // Determine the quantity to be planned and the date for the planning loop
    double plan_qty = l->getQuantity() - l->getPlannedQuantity();
    Date plan_date = l->getDue();
    if (plan_qty < ROUNDING_ERROR || plan_date == Date::infiniteFuture)
    {
      if (loglevel>0) logger << "  Nothing to be planned." << endl;
      data->pop();
      return;
    }
    if (plan_qty < l->getMinShipment()) plan_qty = l->getMinShipment();

    // Temporary values to store the 'best-reply' so far
    double best_q_qty = 0.0, best_a_qty = 0.0;
    Date best_q_date;

    // Select delivery operation
    Operation* deliveryoper = l->getDeliveryOperation();

    // Handle invalid or missing delivery operations
    {
    string problemtext = string("Demand '") + l->getName() + "' has no delivery operation";
    Problem::const_iterator j = Problem::begin(const_cast<Demand*>(l), false);
    while (j!=Problem::end())
    {
      if (&(j->getType()) == ProblemInvalidData::metadata
          && j->getDescription() == problemtext)
        break;
      ++j;
    }
    if (!deliveryoper)
    {
      // Create a problem
      if (j == Problem::end())
      new ProblemInvalidData(const_cast<Demand*>(l), problemtext, "demand",
          l->getDue(), l->getDue(), l->getQuantity());
      // Abort planning of this demand
      throw DataException("Demand '" + l->getName() + "' can't be planned");
    }
    else
      // Remove problem that may already exist
      delete &*j;
    }

    // Planning loop
    do
    {
      // Message
      if (loglevel>0)
        logger << "Demand '" << l << "' asks: "
            << plan_qty << "  " << plan_date << endl;

      // Store the last command in the list, in order to undo the following
      // commands if required.
      CommandManager::Bookmark* topcommand = data->setBookmark();

      // Plan the demand by asking the delivery operation to plan
      double q_qty = plan_qty;
      data->state->curBuffer = NULL;
      data->state->q_qty = plan_qty;
      data->state->q_date = plan_date;
      data->planningDemand = const_cast<Demand*>(l);
      data->state->curDemand = const_cast<Demand*>(l);
      data->state->curOwnerOpplan = NULL;
      deliveryoper->solve(*this,v);
      Date next_date = data->state->a_date;

      if (data->state->a_qty < ROUNDING_ERROR
          && plan_qty > l->getMinShipment() && l->getMinShipment() > 0)
      {
        bool originalLogConstraints = data->logConstraints;
        data->logConstraints = false;
        try
        {
          // The full asked quantity is not possible.
          // Try with the minimum shipment quantity.
          if (loglevel>1)
            logger << "Demand '" << l << "' tries planning minimum quantity " << l->getMinShipment() << endl;
          data->rollback(topcommand);
          data->state->curBuffer = NULL;
          data->state->q_qty = l->getMinShipment();
          data->state->q_date = plan_date;
          data->state->curDemand = const_cast<Demand*>(l);
          deliveryoper->solve(*this,v);
          if (data->state->a_date < next_date)
            next_date = data->state->a_date;
          if (data->state->a_qty > ROUNDING_ERROR)
          {
            // The minimum shipment quantity is feasible.
            // Now try iteratively different quantities to find the best we can do.
            double min_qty = l->getMinShipment();
            double max_qty = plan_qty;
            double delta = fabs(max_qty - min_qty);
            while (delta > data->getSolver()->getIterationAccuracy() * l->getQuantity()
                && delta > data->getSolver()->getIterationThreshold())
            {
              // Note: we're kind of assuming that the demand is an integer value here.
              double new_qty = floor((min_qty + max_qty) / 2);
              if (new_qty == min_qty)
              {
                // Required to avoid an infinite loop on the same value...
                new_qty += 1;
                if (new_qty > max_qty) break;
              }
              if (loglevel>0)
                logger << "Demand '" << l << "' tries planning a different quantity " << new_qty << endl;
              data->rollback(topcommand);
              data->state->curBuffer = NULL;
              data->state->q_qty = new_qty;
              data->state->q_date = plan_date;
              data->state->curDemand = const_cast<Demand*>(l);
              deliveryoper->solve(*this,v);
              if (data->state->a_date < next_date)
                next_date = data->state->a_date;
              if (data->state->a_qty > ROUNDING_ERROR)
                // Too small: new min
                min_qty = new_qty;
              else
                // Too big: new max
                max_qty = new_qty;
              delta = fabs(max_qty - min_qty);
            }
            q_qty = min_qty;  // q_qty is the biggest Q quantity giving a positive reply
            if (data->state->a_qty <= ROUNDING_ERROR)
            {
              if (loglevel>0)
                logger << "Demand '" << l << "' restores plan for quantity " << min_qty << endl;
              // Restore the last feasible plan
              data->rollback(topcommand);
              data->state->curBuffer = NULL;
              data->state->q_qty = min_qty;
              data->state->q_date = plan_date;
              data->state->curDemand = const_cast<Demand*>(l);
              deliveryoper->solve(*this,v);
            }
          }
        }
        catch (...)
        {
          data->logConstraints = originalLogConstraints;
          throw;
        }
        data->logConstraints = originalLogConstraints;
      }

      // Message
      if (loglevel>0)
        logger << "Demand '" << l << "' gets answer: "
            << data->state->a_qty << "  " << next_date << "  "
            << data->state->a_cost << "  " << data->state->a_penalty << endl;

      // Update the date to plan in the next loop
      Date copy_plan_date = plan_date;

      // Compare the planned quantity with the minimum allowed shipment quantity
      // We don't accept the answer in case:
      // 1) Nothing is planned
      // 2) The planned quantity is less than the minimum shipment quantity
      // 3) The remaining quantity after accepting this answer is less than
      //    the minimum quantity.
      if (data->state->a_qty < ROUNDING_ERROR
          || data->state->a_qty + ROUNDING_ERROR < l->getMinShipment()
          || (plan_qty - data->state->a_qty < l->getMinShipment()
              && fabs(plan_qty - data->state->a_qty) > ROUNDING_ERROR))
      {
        if (plan_qty - data->state->a_qty < l->getMinShipment()
            && data->state->a_qty + ROUNDING_ERROR >= l->getMinShipment()
            && data->state->a_qty > best_a_qty )
        {
          // The remaining quantity after accepting this answer is less than
          // the minimum quantity. Therefore, we delay accepting it now, but
          // still keep track of this best offer.
          best_a_qty = data->state->a_qty;
          best_q_qty = q_qty;
          best_q_date = plan_date;
        }

        // Delete operationplans - Undo all changes
        data->rollback(topcommand);

        // Set the ask date for the next pass through the loop
        if (next_date <= copy_plan_date
          || (!data->getSolver()->getAllowSplits() && data->state->a_qty > ROUNDING_ERROR)
          || (data->state->a_qty > ROUNDING_ERROR && plan_qty - data->state->a_qty < l->getMinShipment()
              && plan_qty - data->state->a_qty > ROUNDING_ERROR))
        {
          // Oops, we didn't get a proper answer we can use for the next loop.
          // Print a warning and simply try one day later.
          if (loglevel>0)
            logger << "Warning: Demand '" << l << "': Lazy retry" << endl;
          plan_date = copy_plan_date + data->getSolver()->getLazyDelay();
        }
        else
          // Use the next-date answer from the solver
          plan_date = next_date;
      }
      else
      {
        // Accepting this answer
        if (data->state->a_qty + ROUNDING_ERROR < q_qty)
        {
          // The demand was only partially planned. We need to do a new
          // 'coordinated' planning run.

          // Delete operationplans created in the 'testing round'
          data->rollback(topcommand);

          // Create the correct operationplans
          if (loglevel>=2)
            logger << "Demand '" << l << "' plans coordination." << endl;
          data->getSolver()->setLogLevel(0);
          double tmpresult = 0;
          try
          {
            for(double remainder = data->state->a_qty;
                remainder > ROUNDING_ERROR; remainder -= data->state->a_qty)
            {
              data->state->q_qty = remainder;
              data->state->q_date = copy_plan_date;
              data->state->curDemand = const_cast<Demand*>(l);
              data->state->curBuffer = NULL;
              deliveryoper->solve(*this,v);
              if (data->state->a_qty < ROUNDING_ERROR)
              {
                logger << "Warning: Demand '" << l << "': Failing coordination" << endl;
                break;
              }
              tmpresult += data->state->a_qty;
            }
          }
          catch (...)
          {
            data->getSolver()->setLogLevel(loglevel);
            throw;
          }
          data->getSolver()->setLogLevel(loglevel);
          data->state->a_qty = tmpresult;
          if (tmpresult == 0) break;
        }

        // Register the new operationplans. We need to make sure that the
        // correct execute method is called!
        if (data->getSolver()->getAutocommit())
        {
          data->getSolver()->scanExcess(data);
          data->CommandManager::commit();
        }

        // Update the quantity to plan in the next loop
        plan_qty -= data->state->a_qty;
        best_a_qty = 0.0;  // Reset 'best-answer' remember
      }

    }
    // Repeat while there is still a quantity left to plan and we aren't
    // exceeding the maximum delivery delay.
    while (plan_qty > ROUNDING_ERROR
        && ((data->getSolver()->getPlanType() != 2 && plan_date < l->getDue() + l->getMaxLateness())
            || (data->getSolver()->getPlanType() == 2 && !data->constrainedPlanning && plan_date < l->getDue() + l->getMaxLateness())
            || (data->getSolver()->getPlanType() == 2 && data->constrainedPlanning && plan_date == l->getDue())
           ));

    // Accept the best possible answer.
    // We may have skipped it in the previous loop, awaiting a still better answer
    if (best_a_qty > 0.0 && data->constrainedPlanning)
    {
      if (loglevel>=2) logger << "Demand '" << l << "' accepts a best answer." << endl;
      data->getSolver()->setLogLevel(0);
      try
      {
        for (double remainder = best_q_qty;
          remainder > ROUNDING_ERROR && remainder > l->getMinShipment();
          remainder -= data->state->a_qty)
        {
          data->state->q_qty = remainder;
          data->state->q_date = best_q_date;
          data->state->curDemand = const_cast<Demand*>(l);
          data->state->curBuffer = NULL;
          deliveryoper->solve(*this,v);
          if (data->state->a_qty < ROUNDING_ERROR)
          {
            logger << "Warning: Demand '" << l << "': Failing accepting best answer" << endl;
            break;
          }
        }
        if (data->getSolver()->getAutocommit())
        {
          data->getSolver()->scanExcess(data);
          data->CommandManager::commit();
        }
      }
      catch (...)
      {
        data->getSolver()->setLogLevel(loglevel);
        throw;
      }
      data->getSolver()->setLogLevel(loglevel);
    }

    // Reset the state stack to the position we found it at.
    while (data->state > mystate) data->pop();

  }
  catch (...)
  {
    // Clean up if any exception happened during the planning of the demand
    while (data->state > mystate) data->pop();
    data->rollback(topcommand);
    throw;
  }
}
Exemplo n.º 16
0
PyObject* Flow::create(PyTypeObject* pytype, PyObject* args, PyObject* kwds)
{
  try
  {
    // Pick up the operation
    PyObject* oper = PyDict_GetItemString(kwds, "operation");
    if (!oper)
      throw DataException("missing operation on Flow");
    if (!PyObject_TypeCheck(oper, Operation::metadata->pythonClass))
      throw DataException("flow operation must be of type operation");
    else if (!static_cast<Operation*>(oper)->getLocation())
      throw DataException("operation location is unspecified");

    // Pick up the item
    PyObject* item = PyDict_GetItemString(kwds, "item");
    if (!item)
      throw DataException("missing item on Flow");
    if (!PyObject_TypeCheck(item, Item::metadata->pythonClass))
      throw DataException("flow item must be of type item");

    // Pick up the quantity
    PyObject* q1 = PyDict_GetItemString(kwds, "quantity");
    double q2 = q1 ? PythonData(q1).getDouble() : 1.0;

    // Pick up the effectivity dates
    DateRange eff;
    PyObject* eff_start = PyDict_GetItemString(kwds, "effective_start");
    if (eff_start)
    {
      PythonData d(eff_start);
      eff.setStart(d.getDate());
    }
    PyObject* eff_end = PyDict_GetItemString(kwds, "effective_end");
    if (eff_end)
    {
      PythonData d(eff_end);
      eff.setEnd(d.getDate());
    }

    // Find or create a buffer for the item at the operation location
    Buffer* buf = Buffer::findOrCreate(
      static_cast<Item*>(item),
      static_cast<Operation*>(oper)->getLocation()
      );

    // Pick up the type and create the flow
    Flow *l;
    PyObject* t = PyDict_GetItemString(kwds, "type");
    if (t)
    {
      PythonData d(t);
      if (d.getString() == "flow_end")
        l = new FlowEnd(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
      else if (d.getString() == "flow_transfer_batch")
        l = new FlowTransferBatch(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
      else
        l = new FlowStart(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
    }
    else
      l = new FlowStart(
        static_cast<Operation*>(oper),
        static_cast<Buffer*>(buf),
        q2
      );

    // Iterate over extra keywords, and set attributes.   @todo move this responsibility to the readers...
    if (l)
    {
      l->setEffective(eff);
      PyObject *key, *value;
      Py_ssize_t pos = 0;
      while (PyDict_Next(kwds, &pos, &key, &value))
      {
        PythonData field(value);
        PyObject* key_utf8 = PyUnicode_AsUTF8String(key);
        DataKeyword attr(PyBytes_AsString(key_utf8));
        Py_DECREF(key_utf8);
        if (!attr.isA(Tags::effective_end) && !attr.isA(Tags::effective_start)
          && !attr.isA(Tags::operation) && !attr.isA(Tags::buffer)
          && !attr.isA(Tags::quantity) && !attr.isA(Tags::type)
          && !attr.isA(Tags::action))
        {
          const MetaFieldBase* fmeta = l->getType().findField(attr.getHash());
          if (!fmeta && l->getType().category)
            fmeta = l->getType().category->findField(attr.getHash());
          if (fmeta)
            // Update the attribute
            fmeta->setField(l, field);
          else
            l->setProperty(attr.getName(), value);
        }
      };
    }

    // Return the object
    Py_INCREF(l);
    return static_cast<PyObject*>(l);
  }
  catch (...)
  {
    PythonType::evalException();
    return nullptr;
  }
}
Exemplo n.º 17
0
PyObject* Flow::create(PyTypeObject* pytype, PyObject* args, PyObject* kwds)
{
  try
  {
    // Pick up the operation
    PyObject* oper = PyDict_GetItemString(kwds, "operation");
    if (!oper)
      throw DataException("missing operation on Flow");
    if (!PyObject_TypeCheck(oper, Operation::metadata->pythonClass))
      throw DataException("flow operation must be of type operation");

    // Pick up the buffer
    PyObject* buf = PyDict_GetItemString(kwds, "buffer");
    if (!buf)
      throw DataException("missing buffer on Flow");
    if (!PyObject_TypeCheck(buf, Buffer::metadata->pythonClass))
      throw DataException("flow buffer must be of type buffer");

    // Pick up the quantity
    PyObject* q1 = PyDict_GetItemString(kwds, "quantity");
    double q2 = q1 ? PythonData(q1).getDouble() : 1.0;

    // Pick up the effectivity dates
    DateRange eff;
    PyObject* eff_start = PyDict_GetItemString(kwds, "effective_start");
    if (eff_start)
    {
      PythonData d(eff_start);
      eff.setStart(d.getDate());
    }
    PyObject* eff_end = PyDict_GetItemString(kwds, "effective_end");
    if (eff_end)
    {
      PythonData d(eff_end);
      eff.setEnd(d.getDate());
    }

    // Pick up the type and create the flow
    Flow *l;
    PyObject* t = PyDict_GetItemString(kwds, "type");
    if (t)
    {
      PythonData d(t);
      if (d.getString() == "flow_end")
        l = new FlowEnd(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
      else if (d.getString() == "flow_fixed_end")
        l = new FlowFixedEnd(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
      else if (d.getString() == "flow_fixed_start")
        l = new FlowFixedStart(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
      else
        l = new FlowStart(
          static_cast<Operation*>(oper),
          static_cast<Buffer*>(buf),
          q2
        );
    }
    else
      l = new FlowStart(
        static_cast<Operation*>(oper),
        static_cast<Buffer*>(buf),
        q2
      );

    // Iterate over extra keywords, and set attributes.   @todo move this responsibility to the readers...
    if (l)
    {
      l->setEffective(eff);
      PyObject *key, *value;
      Py_ssize_t pos = 0;
      while (PyDict_Next(kwds, &pos, &key, &value))
      {
        PythonData field(value);
        PyObject* key_utf8 = PyUnicode_AsUTF8String(key);
        DataKeyword attr(PyBytes_AsString(key_utf8));
        Py_DECREF(key_utf8);
        if (!attr.isA(Tags::effective_end) && !attr.isA(Tags::effective_start)
          && !attr.isA(Tags::operation) && !attr.isA(Tags::buffer)
          && !attr.isA(Tags::quantity) && !attr.isA(Tags::type)
          && !attr.isA(Tags::action))
        {
          const MetaFieldBase* fmeta = l->getType().findField(attr.getHash());
          if (!fmeta && l->getType().category)
            fmeta = l->getType().category->findField(attr.getHash());
          if (fmeta)
            // Update the attribute
            fmeta->setField(l, field);
          else
            PyErr_Format(PyExc_AttributeError,
                "attribute '%S' on '%s' can't be updated",
                key, Py_TYPE(l)->tp_name);
        }
      };
    }

    // Return the object
    Py_INCREF(l);
    return static_cast<PyObject*>(l);
  }
  catch (...)
  {
    PythonType::evalException();
    return NULL;
  }
}