Ejemplo n.º 1
0
SessionImpl::SessionImpl(std::shared_ptr<ContextImpl>  context,
                         gmx::MdrunnerBuilder        &&runnerBuilder,
                         const gmx::SimulationContext &simulationContext,
                         gmx::LogFilePtr               fplog,
                         gmx_multisim_t              * multiSim) :
    context_(std::move(context)),
    mpiContextManager_(std::make_unique<MpiContextManager>()),
    simulationContext_(simulationContext),
    logFilePtr_(std::move(fplog)),
    multiSim_(multiSim)
{
    GMX_ASSERT(context_, "SessionImpl invariant implies valid ContextImpl handle.");
    GMX_ASSERT(mpiContextManager_, "SessionImpl invariant implies valid MpiContextManager guard.");
    GMX_ASSERT(simulationContext_.communicationRecord_, "SessionImpl invariant implies valid commrec.");
    GMX_UNUSED_VALUE(multiSim_);
    GMX_UNUSED_VALUE(simulationContext_);

    // \todo Session objects can have logic specialized for the runtime environment.

    auto stopHandlerBuilder = std::make_unique<gmx::StopHandlerBuilder>();
    signalManager_ = std::make_unique<SignalManager>(stopHandlerBuilder.get());
    GMX_ASSERT(signalManager_, "SessionImpl invariant includes a valid SignalManager.");

    runnerBuilder.addStopHandlerBuilder(std::move(stopHandlerBuilder));
    runner_ = std::make_unique<gmx::Mdrunner>(runnerBuilder.build());
    GMX_ASSERT(runner_, "SessionImpl invariant implies valid Mdrunner handle.");

    // For the libgromacs context, a session should explicitly reset global variables that could
    // have been set in a previous simulation during the same process.
    gmx_reset_stop_condition();
}
Ejemplo n.º 2
0
/*!
 * \param[in,out] dest   Destination positions.
 * \param[in]     src    Source positions.
 * \param[in]     bFirst If true, memory is allocated for \p dest and a full
 *   copy is made; otherwise, only variable parts are copied, and no memory
 *   is allocated.
 *
 * \p dest should have been initialized somehow (calloc() is enough).
 */
void
gmx_ana_pos_copy(gmx_ana_pos_t *dest, gmx_ana_pos_t *src, bool bFirst)
{
    if (bFirst)
    {
        gmx_ana_pos_reserve(dest, src->count(), 0);
        if (src->v)
        {
            gmx_ana_pos_reserve_velocities(dest);
        }
        if (src->f)
        {
            gmx_ana_pos_reserve_forces(dest);
        }
    }
    memcpy(dest->x, src->x, src->count()*sizeof(*dest->x));
    if (dest->v)
    {
        GMX_ASSERT(src->v, "src velocities should be non-null if dest velocities are allocated");
        memcpy(dest->v, src->v, src->count()*sizeof(*dest->v));
    }
    if (dest->f)
    {
        GMX_ASSERT(src->f, "src forces should be non-null if dest forces are allocated");
        memcpy(dest->f, src->f, src->count()*sizeof(*dest->f));
    }
    gmx_ana_indexmap_copy(&dest->m, &src->m, bFirst);
}
Ejemplo n.º 3
0
/*! \brief Clears nonbonded shift force output array and energy outputs on the GPU.
 */
static void
nbnxn_ocl_clear_e_fshift(gmx_nbnxn_ocl_t *nb)
{

    cl_int               cl_error;
    cl_atomdata_t *      adat     = nb->atdat;
    cl_command_queue     ls       = nb->stream[eintLocal];

    size_t               local_work_size[3]   = {1, 1, 1};
    size_t               global_work_size[3]  = {1, 1, 1};

    cl_int               shifts   = SHIFTS*3;

    cl_int               arg_no;

    cl_kernel            zero_e_fshift = nb->kernel_zero_e_fshift;

    local_work_size[0]   = 64;
    // Round the total number of threads up from the array size
    global_work_size[0]  = ((shifts + local_work_size[0] - 1)/local_work_size[0])*local_work_size[0];

    arg_no    = 0;
    cl_error  = clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_mem), &(adat->fshift));
    cl_error |= clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_mem), &(adat->e_lj));
    cl_error |= clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_mem), &(adat->e_el));
    cl_error |= clSetKernelArg(zero_e_fshift, arg_no++, sizeof(cl_uint), &shifts);
    GMX_ASSERT(cl_error == CL_SUCCESS, ocl_get_error_string(cl_error).c_str());

    cl_error = clEnqueueNDRangeKernel(ls, zero_e_fshift, 3, nullptr, global_work_size, local_work_size, 0, nullptr, nullptr);
    GMX_ASSERT(cl_error == CL_SUCCESS, ocl_get_error_string(cl_error).c_str());
}
Ejemplo n.º 4
0
AnalysisDataPointSetRef::AnalysisDataPointSetRef(
        const AnalysisDataPointSetRef &points, int firstColumn, int columnCount)
    : header_(points.header()), dataSetIndex_(points.dataSetIndex()),
      firstColumn_(0)
{
    GMX_ASSERT(firstColumn >= 0, "Invalid first column");
    GMX_ASSERT(columnCount >= 0, "Invalid column count");
    if (points.lastColumn() < firstColumn
        || points.firstColumn() >= firstColumn + columnCount
        || columnCount == 0)
    {
        return;
    }
    AnalysisDataValuesRef::const_iterator begin = points.values().begin();
    int pointsOffset = firstColumn - points.firstColumn();
    if (pointsOffset > 0)
    {
        // Offset pointer if the first column is not the first in points.
        begin += pointsOffset;
    }
    else
    {
        // Take into account if first column is before the first in points.
        firstColumn_ = -pointsOffset;
        columnCount -= -pointsOffset;
    }
    // Decrease column count if there are not enough columns in points.
    AnalysisDataValuesRef::const_iterator end = begin + columnCount;
    if (pointsOffset + columnCount > points.columnCount())
    {
        end = points.values().end();
    }
    values_ = AnalysisDataValuesRef(begin, end);
}
void
AnalysisDataModuleManager::notifyPointsAdd(const AnalysisDataPointSetRef &points) const
{
    GMX_ASSERT(impl_->state_ == Impl::eInFrame, "notifyFrameStart() not called");
    // TODO: Add checks for column spans (requires passing the information
    // about the column counts from somewhere).
    //GMX_ASSERT(points.lastColumn() < columnCount(points.dataSetIndex()),
    //           "Invalid columns");
    GMX_ASSERT(points.frameIndex() == impl_->currIndex_,
               "Points do not correspond to current frame");
    if (impl_->bSerialModules_)
    {
        if (!impl_->bAllowMissing_ && !points.allPresent())
        {
            GMX_THROW(APIError("Missing data not supported by a module"));
        }

        Impl::ModuleList::const_iterator i;
        for (i = impl_->modules_.begin(); i != impl_->modules_.end(); ++i)
        {
            if (!i->bParallel)
            {
                i->module->pointsAdded(points);
            }
        }
    }
}
Ejemplo n.º 6
0
static gmx_pme_t *gmx_pmeonly_switch(std::vector<gmx_pme_t *> *pmedata,
                                     const ivec grid_size,
                                     real ewaldcoeff_q, real ewaldcoeff_lj,
                                     const t_commrec *cr, const t_inputrec *ir)
{
    GMX_ASSERT(pmedata, "Bad PME tuning list pointer");
    for (auto &pme : *pmedata)
    {
        GMX_ASSERT(pme, "Bad PME tuning list element pointer");
        if (pme->nkx == grid_size[XX] &&
            pme->nky == grid_size[YY] &&
            pme->nkz == grid_size[ZZ])
        {
            /* Here we have found an existing PME data structure that suits us.
             * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
             * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
             * So, just some grid size updates in the GPU kernel parameters.
             * TODO: this should be something like gmx_pme_update_split_params()
             */
            gmx_pme_reinit(&pme, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
            return pme;
        }
    }

    const auto &pme          = pmedata->back();
    gmx_pme_t  *newStructure = nullptr;
    // Copy last structure with new grid params
    gmx_pme_reinit(&newStructure, cr, pme, ir, grid_size, ewaldcoeff_q, ewaldcoeff_lj);
    pmedata->push_back(newStructure);
    return newStructure;
}
Ejemplo n.º 7
0
void ddCloseBalanceRegionGpu(const gmx_domdec_t          *dd,
                             float                        waitGpuCyclesInCpuRegion,
                             DdBalanceRegionWaitedForGpu  waitedForGpu)
{
    BalanceRegion *reg = getBalanceRegion(dd);
    if (reg->isOpen)
    {
        GMX_ASSERT(reg->isOpenOnGpu, "Can not close a non-open GPU balance region");
        GMX_ASSERT(!reg->isOpenOnCpu, "The GPU region should be closed after closing the CPU region");

        float waitGpuCyclesEstimate = gmx_cycles_read() - reg->cyclesLastCpu;
        if (waitedForGpu == DdBalanceRegionWaitedForGpu::no)
        {
            /* The actual time could be anywhere between 0 and
             * waitCyclesEstimate. Using half is the best we can do.
             */
            const float unknownWaitEstimateFactor = 0.5f;
            waitGpuCyclesEstimate *= unknownWaitEstimateFactor;
        }

        float cyclesCpu = reg->cyclesLastCpu - reg->cyclesOpenCpu;
        dd_cycles_add(dd, cyclesCpu + waitGpuCyclesEstimate, ddCyclF);

        /* Register the total GPU wait time, to redistribute with GPU sharing */
        dd_cycles_add(dd, waitGpuCyclesInCpuRegion + waitGpuCyclesEstimate, ddCyclWaitGPU);

        /* Close the region */
        reg->isOpenOnGpu = false;
        reg->isOpen      = false;
    }
}
Ejemplo n.º 8
0
void
AbstractAnalysisData::notifyPointsAdd(int firstcol, int n,
                                      const real *y, const real *dy,
                                      const bool *present) const
{
    GMX_ASSERT(_impl->_bInData, "notifyDataStart() not called");
    GMX_ASSERT(_impl->_bInFrame, "notifyFrameStart() not called");
    GMX_ASSERT(firstcol >= 0 && n > 0 && firstcol + n <= _ncol, "Invalid column");
    if (present && !_impl->_bAllowMissing)
    {
        for (int i = 0; i < n; ++i)
        {
            if (!present[i])
            {
                GMX_THROW(APIError("Missing data not supported by a module"));
            }
        }
    }

    Impl::ModuleList::const_iterator i;
    for (i = _impl->_modules.begin(); i != _impl->_modules.end(); ++i)
    {
        (*i)->pointsAdded(_impl->_currx, _impl->_currdx, firstcol, n,
                          y, dy, present);
    }
}
Ejemplo n.º 9
0
/*! \brief Returns the pointer to the balance region.
 *
 * This should be replaced by a properly managed BalanceRegion class,
 * but that requires a lot of refactoring in domdec.cpp.
 */
static BalanceRegion *getBalanceRegion(const gmx_domdec_t *dd)
{
    GMX_ASSERT(dd != nullptr && dd->comm != nullptr, "Balance regions should only be used with DD");
    BalanceRegion *region = dd->comm->balanceRegion;
    GMX_ASSERT(region != nullptr, "Balance region should be initialized before use");
    return region;
}
Ejemplo n.º 10
0
 ColumnData(const char *title, int width, bool bWrap)
     : title_(title != NULL ? title : ""),
       width_(width), bWrap_(bWrap), firstLine_(0)
 {
     GMX_ASSERT(width >= 0, "Negative width not possible");
     GMX_ASSERT(title_.length() <= static_cast<size_t>(width),
                "Title too long for column width");
 }
Ejemplo n.º 11
0
AnalysisDataPointSetRef::AnalysisDataPointSetRef(
        const AnalysisDataFrameHeader &header, int firstColumn,
        const AnalysisDataValuesRef &values)
    : header_(header), firstColumn_(firstColumn), values_(values)
{
    GMX_ASSERT(header_.isValid(),
               "Invalid point set reference should not be constructed");
    GMX_ASSERT(firstColumn >= 0, "Invalid first column");
}
Ejemplo n.º 12
0
AnalysisDataFrameRef::AnalysisDataFrameRef(
        const AnalysisDataFrameRef &frame, int firstColumn, int columnCount)
    : header_(frame.header()), values_(&frame.values_[firstColumn], columnCount)
{
    GMX_ASSERT(firstColumn >= 0, "Invalid first column");
    GMX_ASSERT(columnCount >= 0, "Invalid column count");
    GMX_ASSERT(firstColumn + columnCount <= frame.columnCount(),
               "Invalid last column");
}
Ejemplo n.º 13
0
void
AbstractAnalysisData::notifyFrameFinish() const
{
    GMX_ASSERT(_impl->_bInData, "notifyDataStart() not called");
    GMX_ASSERT(_impl->_bInFrame, "notifyFrameStart() not called");
    _impl->_bInFrame = false;

    Impl::ModuleList::const_iterator i;

    for (i = _impl->_modules.begin(); i != _impl->_modules.end(); ++i)
    {
        (*i)->frameFinished();
    }
}
Ejemplo n.º 14
0
void HistogramSize::setHistogramSize(double histogramSize,
                                     double weightHistogramScalingFactor)
{
    GMX_ASSERT(histogramSize > 0, "The histogram should not be empty");
    GMX_ASSERT(weightHistogramScalingFactor > 0, "The histogram scaling factor should be positive");

    histogramSize_ = histogramSize;

    /* The weight of new samples relative to previous ones change
     * when the histogram is rescaled. We keep the log since this number
     * can become very large.
     */
    logScaledSampleWeight_ -= std::log(weightHistogramScalingFactor);
};
Ejemplo n.º 15
0
void pme_gpu_launch_spread(gmx_pme_t            *pme,
                           const rvec           *x,
                           gmx_wallcycle        *wcycle)
{
    GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");

    PmeGpu *pmeGpu = pme->gpu;

    // The only spot of PME GPU where LAUNCH_GPU counter increases call-count
    wallcycle_start(wcycle, ewcLAUNCH_GPU);
    // The only spot of PME GPU where ewcsLAUNCH_GPU_PME subcounter increases call-count
    wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_PME);
    pme_gpu_copy_input_coordinates(pmeGpu, x);
    wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
    wallcycle_stop(wcycle, ewcLAUNCH_GPU);

    const unsigned int gridIndex  = 0;
    real              *fftgrid    = pme->fftgrid[gridIndex];
    if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
    {
        /* Spread the coefficients on a grid */
        const bool computeSplines = true;
        const bool spreadCharges  = true;
        wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
        wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
        pme_gpu_spread(pmeGpu, gridIndex, fftgrid, computeSplines, spreadCharges);
        wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
        wallcycle_stop(wcycle, ewcLAUNCH_GPU);
    }
}
Ejemplo n.º 16
0
void TopologyManager::loadTopology(const char *filename)
{
    bool    fullTopology;
    int     ePBC;
    rvec   *xtop = nullptr;
    matrix  box;

    GMX_RELEASE_ASSERT(mtop_ == nullptr, "Topology initialized more than once");
    mtop_ = gmx::compat::make_unique<gmx_mtop_t>();
    readConfAndTopology(
            gmx::test::TestFileManager::getInputFilePath(filename).c_str(),
            &fullTopology, mtop_.get(), &ePBC, frame_ != nullptr ? &xtop : nullptr,
            nullptr, box);

    if (frame_ != nullptr)
    {
        GMX_ASSERT(xtop != nullptr, "Keep the static analyzer happy");
        frame_->natoms = mtop_->natoms;
        frame_->bX     = TRUE;
        snew(frame_->x, frame_->natoms);
        std::memcpy(frame_->x, xtop, sizeof(*frame_->x) * frame_->natoms);
        frame_->bBox   = TRUE;
        copy_mat(box, frame_->box);
    }

    sfree(xtop);
}
Ejemplo n.º 17
0
/*! \brief
 * Helper for splitting a sequence of atom indices into groups.
 *
 * \param[in]     atomIndex  Index of the next atom in the sequence.
 * \param[in]     top        Topology structure.
 * \param[in]     type       Type of group to split into.
 * \param[in,out] id         Variable to receive the next group id.
 * \returns  `true` if \p atomIndex starts a new group in the sequence, i.e.,
 *     if \p *id was changed.
 *
 * \p *id should be initialized to `-1` before first call of this function, and
 * then each atom index in the sequence passed to the function in turn.
 *
 * \ingroup module_selection
 */
static bool
next_group_index(int atomIndex, t_topology *top, e_index_t type, int *id)
{
    int prev = *id;
    switch (type)
    {
        case INDEX_ATOM:
            *id = atomIndex;
            break;
        case INDEX_RES:
            *id = top->atoms.atom[atomIndex].resind;
            break;
        case INDEX_MOL:
            if (*id >= 0 && top->mols.index[*id] > atomIndex)
            {
                *id = 0;
            }
            while (*id < top->mols.nr && atomIndex >= top->mols.index[*id+1])
            {
                ++*id;
            }
            GMX_ASSERT(*id < top->mols.nr, "Molecules do not span all the atoms");
            break;
        case INDEX_UNKNOWN:
        case INDEX_ALL:
            *id = 0;
            break;
    }
    return prev != *id;
}
Ejemplo n.º 18
0
/*! \brief
 * A convenience wrapper for launching either the GPU or CPU FFT.
 *
 * \param[in] pme            The PME structure.
 * \param[in] gridIndex      The grid index - should currently always be 0.
 * \param[in] dir            The FFT direction enum.
 * \param[in] wcycle         The wallclock counter.
 */
void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t              *pme,
                                               const int               gridIndex,
                                               enum gmx_fft_direction  dir,
                                               gmx_wallcycle_t         wcycle)
{
    GMX_ASSERT(gridIndex == 0, "Only single grid supported");
    if (pme_gpu_performs_FFT(pme->gpu))
    {
        wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
        wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
        pme_gpu_3dfft(pme->gpu, dir, gridIndex);
        wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
        wallcycle_stop(wcycle, ewcLAUNCH_GPU);
    }
    else
    {
        wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
#pragma omp parallel for num_threads(pme->nthread) schedule(static)
        for (int thread = 0; thread < pme->nthread; thread++)
        {
            gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
        }
        wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
    }
}
Ejemplo n.º 19
0
//! This function is documented in the header file
bool canDetectGpus(std::string *errorMessage)
{
    cl_uint numPlatforms;
    cl_int  status       = clGetPlatformIDs(0, nullptr, &numPlatforms);
    GMX_ASSERT(status != CL_INVALID_VALUE, "Incorrect call of clGetPlatformIDs detected");
#ifdef cl_khr_icd
    if (status == CL_PLATFORM_NOT_FOUND_KHR)
    {
        // No valid ICDs found
        if (errorMessage != nullptr)
        {
            errorMessage->assign("No valid OpenCL driver found");
        }
        return false;
    }
#endif
    GMX_RELEASE_ASSERT(status == CL_SUCCESS,
                       gmx::formatString("An unexpected value was returned from clGetPlatformIDs %d: %s",
                                         status, ocl_get_error_string(status).c_str()).c_str());
    bool foundPlatform = (numPlatforms > 0);
    if (!foundPlatform && errorMessage != nullptr)
    {
        errorMessage->assign("No OpenCL platforms found even though the driver was valid");
    }
    return foundPlatform;
}
Ejemplo n.º 20
0
Status SessionImpl::addRestraint(std::shared_ptr<gmxapi::MDModule> module)
{
    GMX_ASSERT(runner_, "SessionImpl invariant implies valid Mdrunner handle.");
    Status status {
        false
    };

    if (module != nullptr)
    {
        const auto &name = module->name();
        if (restraints_.find(name) == restraints_.end())
        {
            auto restraint = module->getRestraint();
            if (restraint != nullptr)
            {
                restraints_.emplace(std::make_pair(name, restraint));
                auto sessionResources = createResources(module);
                if (!sessionResources)
                {
                    status = false;
                }
                else
                {
                    runner_->addPotential(restraint, module->name());
                    status = true;
                }
            }
        }
    }
    return status;
}
bool
AnalysisDataModuleManager::hasSerialModules() const
{
    GMX_ASSERT(impl_->state_ != Impl::eNotStarted,
               "Module state not accessible before data is started");
    return impl_->bSerialModules_;
}
Ejemplo n.º 22
0
void
AbstractAnalysisData::notifyFrameStart(real x, real dx) const
{
    GMX_ASSERT(_impl->_bInData, "notifyDataStart() not called");
    GMX_ASSERT(!_impl->_bInFrame,
               "notifyFrameStart() called while inside a frame");
    _impl->_bInFrame = true;
    _impl->_currx  = x;
    _impl->_currdx = dx;

    Impl::ModuleList::const_iterator i;
    for (i = _impl->_modules.begin(); i != _impl->_modules.end(); ++i)
    {
        (*i)->frameStarted(x, dx);
    }
}
Ejemplo n.º 23
0
AnalysisDataFrameRef::AnalysisDataFrameRef(
        const AnalysisDataFrameRef &frame, int firstColumn, int columnCount)
    : header_(frame.header()),
      values_(constArrayRefFromArray(&frame.values_[firstColumn], columnCount)),
      pointSets_(frame.pointSets_)
{
    // FIXME: This doesn't produce a valid internal state, although it does
    // work in some cases. The point sets cannot be correctly managed here, but
    // need to be handles by the data proxy class.
    GMX_ASSERT(firstColumn >= 0, "Invalid first column");
    GMX_ASSERT(columnCount >= 0, "Invalid column count");
    GMX_ASSERT(pointSets_.size() == 1U,
               "Subsets of frames only supported with simple data");
    GMX_ASSERT(firstColumn + columnCount <= ssize(values_),
               "Invalid last column");
}
Ejemplo n.º 24
0
void BiasWriter::transferMetaDataToWriter(gmx::index         metaDataIndex,
                                          AwhOutputMetaData  metaDataType,
                                          const Bias        &bias)
{
    gmx::ArrayRef<float> data = block_[getVarStartBlock(AwhOutputEntryType::MetaData)].data();
    GMX_ASSERT(metaDataIndex < data.ssize(), "Attempt to transfer AWH meta data to block for index out of range");

    /* Transfer the point data of this variable to the right block(s) */
    switch (metaDataType)
    {
        case AwhOutputMetaData::NumBlock:
            /* The number of subblocks per awh (needed by gmx_energy) */
            data[metaDataIndex] = static_cast<double>(block_.size());
            /* Note: a single subblock takes only a single type and we need doubles. */
            break;
        case AwhOutputMetaData::TargetError:
            /* The theoretical target error */
            data[metaDataIndex] = bias.params().initialErrorInKT*std::sqrt(bias.params().initialHistogramSize/bias.state().histogramSize().histogramSize());
            break;
        case AwhOutputMetaData::ScaledSampleWeight:
            /* The logarithm of the sample weight relative to a sample weight of 1 at the initial time.
               In the normal case: this will increase in the initial stage and then stay at a constant value. */
            data[metaDataIndex] = bias.state().histogramSize().logScaledSampleWeight();
            break;
        case AwhOutputMetaData::Count:
            break;
    }
}
Ejemplo n.º 25
0
void pr_alloc (int extra, t_params *pr)
{
    int i, j;

    /* get new space for arrays */
    if (extra < 0)
    {
        gmx_fatal(FARGS, "Trying to make array smaller.\n");
    }
    if (extra == 0)
    {
        return;
    }
    GMX_ASSERT(pr->nr != 0 || pr->param == NULL, "Invalid t_params object");
    if (pr->nr+extra > pr->maxnr)
    {
        pr->maxnr = std::max(static_cast<int>(1.2*pr->maxnr), pr->maxnr + extra);
        srenew(pr->param, pr->maxnr);
        for (i = pr->nr; (i < pr->maxnr); i++)
        {
            for (j = 0; (j < MAXATOMLIST); j++)
            {
                pr->param[i].a[j] = 0;
            }
            for (j = 0; (j < MAXFORCEPARAM); j++)
            {
                pr->param[i].c[j] = 0;
            }
            set_p_string(&(pr->param[i]), "");
        }
    }
}
Ejemplo n.º 26
0
Status Session::run() noexcept
{
    GMX_ASSERT(impl_, "Session invariant implies valid implementation object handle.");

    const Status status = impl_->run();
    return status;
}
Ejemplo n.º 27
0
void
AbstractAnalysisData::notifyFrameStart(const AnalysisDataFrameHeader &header) const
{
    GMX_ASSERT(impl_->bInData_, "notifyDataStart() not called");
    GMX_ASSERT(!impl_->bInFrame_,
               "notifyFrameStart() called while inside a frame");
    GMX_ASSERT(header.index() == impl_->nframes_,
               "Out of order frames");
    impl_->bInFrame_ = true;
    impl_->currIndex_ = header.index();

    Impl::ModuleList::const_iterator i;
    for (i = impl_->modules_.begin(); i != impl_->modules_.end(); ++i)
    {
        (*i)->frameStarted(header);
    }
}
Ejemplo n.º 28
0
AnalysisDataPointSetRef::AnalysisDataPointSetRef(
        const AnalysisDataFrameHeader        &header,
        const std::vector<AnalysisDataValue> &values)
    : header_(header), firstColumn_(0), values_(values.begin(), values.end())
{
    GMX_ASSERT(header_.isValid(),
               "Invalid point set reference should not be constructed");
}
Ejemplo n.º 29
0
AnalysisDataFrameRef::AnalysisDataFrameRef(
        const AnalysisDataFrameHeader      &header,
        const AnalysisDataValuesRef        &values,
        const AnalysisDataPointSetInfosRef &pointSets)
    : header_(header), values_(values), pointSets_(pointSets)
{
    GMX_ASSERT(!pointSets_.empty(), "There must always be a point set");
}
Ejemplo n.º 30
0
Session::Session(std::unique_ptr<SessionImpl> impl) noexcept
{
    if (impl != nullptr)
    {
        impl_ = std::move(impl);
    }
    GMX_ASSERT(impl_->isOpen(), "SessionImpl invariant implies valid Mdrunner handle.");
}