Example #1
0
Mat UMat::getMat(int accessFlags) const
{
    if(!u)
        return Mat();
    // TODO Support ACCESS_READ (ACCESS_WRITE) without unnecessary data transfers
    accessFlags |= ACCESS_RW;
    UMatDataAutoLock autolock(u);
    if(CV_XADD(&u->refcount, 1) == 0)
        u->currAllocator->map(u, accessFlags);
    if (u->data != 0)
    {
        Mat hdr(dims, size.p, type(), u->data + offset, step.p);
        hdr.flags = flags;
        hdr.u = u;
        hdr.datastart = u->data;
        hdr.data = u->data + offset;
        hdr.datalimit = hdr.dataend = u->data + u->size;
        return hdr;
    }
    else
    {
        CV_XADD(&u->refcount, -1);
        CV_Assert(u->data != 0 && "Error mapping of UMat to host memory.");
        return Mat();
    }
}
Example #2
0
UMatData::~UMatData()
{
    prevAllocator = currAllocator = 0;
    urefcount = refcount = 0;
    CV_Assert(mapcount == 0);
    data = origdata = 0;
    size = 0;
    flags = 0;
    handle = 0;
    userdata = 0;
    allocatorFlags_ = 0;
    if (originalUMatData)
    {
        UMatData* u = originalUMatData;
        CV_XADD(&(u->urefcount), -1);
        CV_XADD(&(u->refcount), -1);
        bool showWarn = false;
        if (u->refcount == 0)
        {
            if (u->urefcount > 0)
                showWarn = true;
            // simulate Mat::deallocate
            if (u->mapcount != 0)
            {
                (u->currAllocator ? u->currAllocator : /* TODO allocator ? allocator :*/ Mat::getStdAllocator())->unmap(u);
            }
            else
            {
                // we don't do "map", so we can't do "unmap"
            }
        }
        if (u->refcount == 0 && u->urefcount == 0) // oops, we need to free resources
        {
            showWarn = true;
            // simulate UMat::deallocate
            u->currAllocator->deallocate(u);
        }
#ifndef NDEBUG
        if (showWarn)
        {
            static int warn_message_showed = 0;
            if (warn_message_showed++ < 100)
            {
                fflush(stdout);
                fprintf(stderr, "\n! OPENCV warning: getUMat()/getMat() call chain possible problem."
                                "\n!                 Base object is dead, while nested/derived object is still alive or processed."
                                "\n!                 Please check lifetime of UMat/Mat objects!\n");
                fflush(stderr);
            }
        }
#else
        (void)showWarn;
#endif
        originalUMatData = NULL;
    }
}
Example #3
0
UMat::UMat(const UMat& m, const Rect& roi)
    : flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
    allocator(m.allocator), u(m.u), offset(m.offset + roi.y*m.step[0]), size(&rows)
{
    CV_Assert( m.dims <= 2 );
    flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
    flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0;

    size_t esz = CV_ELEM_SIZE(flags);
    offset += roi.x*esz;
    CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
              0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
    if( u )
        CV_XADD(&(u->urefcount), 1);
    if( roi.width < m.cols || roi.height < m.rows )
        flags |= SUBMATRIX_FLAG;

    step[0] = m.step[0]; step[1] = esz;

    if( rows <= 0 || cols <= 0 )
    {
        release();
        rows = cols = 0;
    }
}
Example #4
0
 inline oclMat::oclMat(const oclMat &m)
     : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data),
       refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), clCxt(m.clCxt), offset(m.offset), wholerows(m.wholerows), wholecols(m.wholecols)
 {
     if( refcount )
         CV_XADD(refcount, 1);
 }
Example #5
0
inline
HostMem::HostMem(const HostMem& m)
    : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
{
    if( refcount )
        CV_XADD(refcount, 1);
}
Example #6
0
inline
GpuMat::GpuMat(const GpuMat& m)
    : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)
{
    if (refcount)
        CV_XADD(refcount, 1);
}
Example #7
0
cv::viz::Viz3d& cv::viz::Viz3d::operator=(const Viz3d& other)
{
    if (this != &other)
    {
        release();
        impl_ = other.impl_;
        if (impl_)
            CV_XADD(&impl_->ref_counter, 1);
    }
    return *this;
}
Example #8
0
 ArrayTexture& operator = (const ArrayTexture& tex) {
     if (this != &tex) {
         release();
         
         if (tex.refcount)
             CV_XADD(tex.refcount, 1);
         
         this->refcount=tex.refcount;
     }
     
     return *this;
 }
void vm::scanner::cuda::DeviceMemory::release()
{
    if( refcount_ && CV_XADD(refcount_, -1) == 1 )
    {
        //cv::fastFree(refcount);
        delete refcount_;
        cudaSafeCall( cudaFree(data_) );
    }
    data_ = 0;
    sizeBytes_ = 0;
    refcount_ = 0;
}
Example #10
0
void cv::viz::Viz3d::release()
{
    if (impl_ && CV_XADD(&impl_->ref_counter, -1) == 1)
    {
        delete impl_;
        impl_ = 0;
    }

    if (impl_ && impl_->ref_counter == 1)
        VizStorage::removeUnreferenced();

    impl_ = 0;
}
vm::scanner::cuda::DeviceMemory& vm::scanner::cuda::DeviceMemory::operator = (const vm::scanner::cuda::DeviceMemory& other_arg)
{
    if( this != &other_arg )
    {
        if( other_arg.refcount_ )
            CV_XADD(other_arg.refcount_, 1);
        release();
        
        data_      = other_arg.data_;
        sizeBytes_ = other_arg.sizeBytes_;                
        refcount_  = other_arg.refcount_;
    }
    return *this;
}
Example #12
0
 inline oclMat::oclMat(const oclMat &m, const Rect &roi)
     : flags(m.flags), rows(roi.height), cols(roi.width),
       step(m.step), data(m.data), refcount(m.refcount),
       datastart(m.datastart), dataend(m.dataend), clCxt(m.clCxt), offset(m.offset), wholerows(m.wholerows), wholecols(m.wholecols)
 {
     flags &= roi.width < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;
     offset += roi.y * step + roi.x * elemSize();
     CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.wholecols &&
                0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.wholerows );
     if( refcount )
         CV_XADD(refcount, 1);
     if( rows <= 0 || cols <= 0 )
         rows = cols = 0;
 }
Example #13
0
pcl::gpu::DeviceMemory& pcl::gpu::DeviceMemory::operator = (const pcl::gpu::DeviceMemory& other_arg)
{
    if( this != &other_arg )
    {
        if( other_arg.refcount_ )
            CV_XADD(other_arg.refcount_, 1);
        release();
        
        data_      = other_arg.data_;
        sizeBytes_ = other_arg.sizeBytes_;                
        refcount_  = other_arg.refcount_;
    }
    return *this;
}
Example #14
0
void DeviceMemory2D::release()
{
    if( refcount_ && CV_XADD(refcount_, -1) == 1 )
    {
        //cv::fastFree(refcount);
        delete refcount_;
        cudaSafeCall( cudaFree(data_) );
    }

    colsBytes_ = 0;
    rows_ = 0;    
    data_ = 0;    
    step_ = 0;
    refcount_ = 0;
}
Example #15
0
Mat UMat::getMat(int accessFlags) const
{
    if(!u)
        return Mat();
    u->currAllocator->map(u, accessFlags | ACCESS_READ);
    CV_Assert(u->data != 0);
    Mat hdr(dims, size.p, type(), u->data + offset, step.p);
    hdr.flags = flags;
    hdr.u = u;
    hdr.datastart = u->data;
    hdr.data = hdr.datastart + offset;
    hdr.datalimit = hdr.dataend = u->data + u->size;
    CV_XADD(&hdr.u->refcount, 1);
    return hdr;
}
Example #16
0
QImage imageFromMat(cv::Mat src, QImage::Format format = QImage::Format_Invalid) {
   // By default, preserve the format
   if (format == QImage::Format_Invalid) {
      if (src.channels() == 1)
         format = QImage::Format_Grayscale8;
      else if (src.channels() == 3)
         format = QImage::Format_RGB888;
      else if (src.channels() == 4)
         format = QImage::Format_ARGB32;
   }
   auto data = getConvData(src, format);
   if (!src.data || !src.u || format == QImage::Format_Invalid ||
       data.dst == QImage::Format_Invalid)
      return {};

   QImage dst;
   cv::Mat dstMat_;
   cv::Mat *dstMat = &dstMat;

   bool keepBuffer = false;
#if QT_VERSION >= QT_VERSION_CHECK(5,0,0)
   keepBuffer = CV_XADD(&src.u->refcount, 0) == 1 // sole reference
         && (src.depth() == CV_8U || src.depth() == CV_8S)
         && src.channels() == data.dstChannels();
   if (keepBuffer) {
      dst = QImage((uchar*)src.data, src.cols, src.rows, src.step, data.dstFormat,
                   [](void *m){ delete static_cast<cv::Mat*>(m); }, new cv::Mat(src));
      dstMat = &src;
   }
#endif
   if (!keepBuffer) {
      dst = QImage(src.cols, src.rows, data.dstFormat);
      dstMat_ = cv::Mat(src.rows, src.cols, data.dstCode, dst.bits(), dst.bytesPerLine());
   }

   cv::Mat depthMat_;
   cv::Mat *depthMat = &depthMat;

   if (src.depth() == CV_8U || src.depth() == CV_8S || src.channels() == data.dstChannels())
      depthMat = &dst;

   double alpha = (src.depth == CV_)
   if (src.depth() != CV_8U)
      src.convertTo(src, CV_8U);


   return dst;
}
Example #17
0
int main(int argc, const char **argv)
{
  int totalsize = 4096;
  int fd = portalAlloc(totalsize, 0);
  if (fd < 0) {
    fprintf(stderr, "memory alloc failed\n");
    exit(-1);
  }
  fprintf(stderr, "allocated %d bytes, fd=%d\n", totalsize, fd);
  int *mem = (int*)portalMmap(fd, totalsize);
  *mem = 1;
  fprintf(stderr, "Before CV_XADD: mem=%p *mem=%d\n", mem, *mem);
  CV_XADD(mem, -1);
  fprintf(stderr, "Before CV_XADD: *mem=%d\n", *mem);
  exit(0);
}
vm::scanner::cuda::DeviceMemory2D& vm::scanner::cuda::DeviceMemory2D::operator = (const vm::scanner::cuda::DeviceMemory2D& other_arg)
{
    if( this != &other_arg )
    {
        if( other_arg.refcount_ )
            CV_XADD(other_arg.refcount_, 1);
        release();
        
        colsBytes_ = other_arg.colsBytes_;
        rows_ = other_arg.rows_;
        data_ = other_arg.data_;
        step_ = other_arg.step_;
                
        refcount_ = other_arg.refcount_;
    }
    return *this;
}
        inline oclMat::oclMat(const oclMat &m, const Range &rRange, const Range &cRange)
        {
            flags = m.flags;
            step = m.step;
            refcount = m.refcount;
            data = m.data;
            datastart = m.datastart;
            dataend = m.dataend;
            clCxt = m.clCxt;
            wholerows = m.wholerows;
            wholecols = m.wholecols;
            offset = m.offset;
            if( rRange == Range::all() )
                rows = m.rows;
            else
            {
                CV_Assert( 0 <= rRange.start && rRange.start <= rRange.end && rRange.end <= m.rows );
                rows = rRange.size();
                offset += step * rRange.start;
            }

            if( cRange == Range::all() )
                cols = m.cols;
            else
            {
                CV_Assert( 0 <= cRange.start && cRange.start <= cRange.end && cRange.end <= m.cols );
                cols = cRange.size();
                offset += cRange.start * elemSize();
                flags &= cols < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;
            }

            if( rows == 1 )
                flags |= Mat::CONTINUOUS_FLAG;

            if( refcount )
                CV_XADD(refcount, 1);
            if( rows <= 0 || cols <= 0 )
                rows = cols = 0;
        }
Example #20
0
 inline oclMat &oclMat::operator = (const oclMat &m)
 {
     if( this != &m )
     {
         if( m.refcount )
             CV_XADD(m.refcount, 1);
         release();
         clCxt = m.clCxt;
         flags = m.flags;
         rows = m.rows;
         cols = m.cols;
         step = m.step;
         data = m.data;
         datastart = m.datastart;
         dataend = m.dataend;
         offset = m.offset;
         wholerows = m.wholerows;
         wholecols = m.wholecols;
         refcount = m.refcount;
     }
     return *this;
 }
Example #21
0
 void incRef()
 {
     CV_XADD(&refCount, 1);
 }
Example #22
0
pcl::gpu::DeviceMemory::DeviceMemory(const DeviceMemory& other_arg) 
    : data_(other_arg.data_), sizeBytes_(other_arg.sizeBytes_), refcount_(other_arg.refcount_)
{
    if( refcount_ )
        CV_XADD(refcount_, 1);
}
Example #23
0
 bool allocate(cv::UMatData* u, int /*accessFlags*/, cv::UMatUsageFlags /*usageFlags*/) const
 {
    if (!u) return false;
    CV_XADD(&u->urefcount, 1);
    return true;
 }
Example #24
0
cv::viz::Viz3d::Viz3d(const Viz3d& other) : impl_(other.impl_)
{
    if (impl_)
        CV_XADD(&impl_->ref_counter, 1);
}
Example #25
0
UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
{
    UMat hdr;
    if(!data)
        return hdr;
    if (data != datastart)
    {
        Size wholeSize;
        Point ofs;
        locateROI(wholeSize, ofs);
        Size sz(cols, rows);
        if (ofs.x != 0 || ofs.y != 0)
        {
            Mat src = *this;
            int dtop = ofs.y;
            int dbottom = wholeSize.height - src.rows - ofs.y;
            int dleft = ofs.x;
            int dright = wholeSize.width - src.cols - ofs.x;
            src.adjustROI(dtop, dbottom, dleft, dright);
            return src.getUMat(accessFlags, usageFlags)(cv::Rect(ofs.x, ofs.y, sz.width, sz.height));
        }
    }
    CV_Assert(data == datastart);

    accessFlags |= ACCESS_RW;
    UMatData* new_u = NULL;
    {
        MatAllocator *a = allocator, *a0 = getDefaultAllocator();
        if(!a)
            a = a0;
        new_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags);
    }
    bool allocated = false;
    try
    {
        allocated = UMat::getStdAllocator()->allocate(new_u, accessFlags, usageFlags);
    }
    catch (const cv::Exception& e)
    {
        fprintf(stderr, "Exception: %s\n", e.what());
    }
    if (!allocated)
    {
        allocated = getDefaultAllocator()->allocate(new_u, accessFlags, usageFlags);
        CV_Assert(allocated);
    }
    if (u != NULL)
    {
#ifdef HAVE_OPENCL
        if (ocl::useOpenCL() && new_u->currAllocator == ocl::getOpenCLAllocator())
        {
            CV_Assert(new_u->tempUMat());
        }
#endif
        new_u->originalUMatData = u;
        CV_XADD(&(u->refcount), 1);
        CV_XADD(&(u->urefcount), 1);
    }
    hdr.flags = flags;
    setSize(hdr, dims, size.p, step.p);
    finalizeHdr(hdr);
    hdr.u = new_u;
    hdr.offset = 0; //data - datastart;
    hdr.addref();
    return hdr;
}
Example #26
0
 void decRef()
 {
     if (CV_XADD(&refCount, -1) == 1) deleteSelf();
 }
vm::scanner::cuda::DeviceMemory::DeviceMemory(const DeviceMemory& other_arg)
    : data_(other_arg.data_), sizeBytes_(other_arg.sizeBytes_), refcount_(other_arg.refcount_)
{
    if( refcount_ )
        CV_XADD(refcount_, 1);
}
vm::scanner::cuda::DeviceMemory2D::DeviceMemory2D(const DeviceMemory2D& other_arg) :
    data_(other_arg.data_), step_(other_arg.step_), colsBytes_(other_arg.colsBytes_), rows_(other_arg.rows_), refcount_(other_arg.refcount_)
{
    if( refcount_ )
        CV_XADD(refcount_, 1);
}
Example #29
0
 void release() {
     if (refcount && CV_XADD(refcount, -1) == 1)
         deallocate();
 }