Esempio n. 1
0
/*
 * Get Method Name (and Signature)
 *
 * For the method indicated by method, return the method name via
 * name_ptr and method signature via signature_ptr.
 *
 * REQUIRED Functionality.
 */
jvmtiError JNICALL
jvmtiGetMethodName(jvmtiEnv* env,
                   jmethodID method,
                   char** name_ptr,
                   char** signature_ptr,
                   char** generic_ptr)
{
    TRACE("GetMethodName called");
    SuspendEnabledChecker sec;
    /*
     * Check given env & current phase.
     */
    jvmtiPhase phases[] = {JVMTI_PHASE_START, JVMTI_PHASE_LIVE};

    CHECK_EVERYTHING();

    if( !method ) return JVMTI_ERROR_NULL_POINTER;
    // Either name_ptr, signature_ptr, or generic_ptr can be NULL
    // In this case they are not returned

    char* mtd_name;
    char* mtd_sig;
    Method* mtd = reinterpret_cast<Method*>(method);
    jvmtiError err;
    if( name_ptr )
    {
        const String* name = mtd->get_name();
        err = _allocate(name->len + 1, (unsigned char**)(&mtd_name));
        if(err != JVMTI_ERROR_NONE)
            return err;
        // copy method name
        strcpy(mtd_name, name->bytes);
        *name_ptr = mtd_name;
    }

    if( signature_ptr )
    {
        const String* sig = mtd->get_descriptor();
        err = _allocate(sig->len + 1, (unsigned char**)(&mtd_sig));
        if(err != JVMTI_ERROR_NONE)
        {
            if(name_ptr && mtd_name)
                _deallocate((unsigned char*)mtd_name);
            return err;
        }
        // copy method signature
        strcpy(mtd_sig, sig->bytes);
        *signature_ptr = mtd_sig;
    }

    // ppervov: no generics support in VM as of yet
    if( generic_ptr )
        *generic_ptr = NULL;

    return JVMTI_ERROR_NONE;
}
Esempio n. 2
0
Buffer::Buffer(const Buffer& x)
{
    _rep = _allocate(x._rep->cap, x._minCap);
    memcpy(_rep->data, x._rep->data, x._rep->size);
    _rep->size = x._rep->size;
    _minCap=x._minCap;
}
Esempio n. 3
0
void Buffer::insert(Uint32 pos, const char* data, Uint32 size)
{
    if (pos > _rep->size)
        return;

    Uint32 cap = _rep->size + size;
    Uint32 rem = _rep->size - pos;

    if (cap > _rep->cap)
    {
        BufferRep* rep = _allocate(cap, _minCap);
        rep->size = cap;

        memcpy(rep->data, _rep->data, pos);
        memcpy(rep->data + pos, data, size);
        memcpy(rep->data + pos + size, _rep->data + pos, rem);

        if (_rep->cap != 0)
            free(_rep);

        _rep = rep;
    }
    else
    {
        memmove(_rep->data + pos + size, _rep->data + pos, rem);
        memcpy(_rep->data + pos, data, size);
        _rep->size += size;
    }
}
Esempio n. 4
0
//note: if inmatrix has different row and col from this matrix
//this matrix is cast to has the same row and col as inmatrix
Matrix& Matrix::operator=(Matrix& inmatrix)
{
	if( _err != MCO_SUCCESS)
		return *this;
		
	int32 inrow = inmatrix._row;
	int32 incol = inmatrix._col;	
	
	if(_row != inrow || _col != incol){

//modified on 8/12 to take out the smatrix class
		_row = inrow;
		_col = incol;
		_allocate();

		if(_err == MCO_SUCCESS){				
			for(int32 i = 0; i < _row; i++){
				for(int32 j = 0; j < _col; j++){
					_m[i][j] = inmatrix._m[i][j];
				}
			}		
		}
//end of modification		
	}
	
	else{
		for(int32 i = 0; i < _row; i++){
			for(int32 j = 0; j < _col; j++){
				_m[i][j] = inmatrix._m[i][j];
			}
		}
	}
	
	return *this;
}			
Esempio n. 5
0
/*
 * Get Bytecodes
 *
 * For the method indicated by method, return the byte codes that
 * implement the method. The number of bytecodes is returned via
 * bytecode_count_ptr. The byte codes themselves are returned via
 * bytecodes_ptr.
 *
 * OPTIONAL Functionality.
 */
jvmtiError JNICALL
jvmtiGetBytecodes(jvmtiEnv* env,
                  jmethodID method,
                  jint* bytecode_count_ptr,
                  unsigned char** bytecodes_ptr)
{
    TRACE("GetBytecodes called");
    SuspendEnabledChecker sec;
    /*
     * Check given env & current phase.
     */
    jvmtiPhase phases[] = {JVMTI_PHASE_START, JVMTI_PHASE_LIVE};

    CHECK_EVERYTHING();

    CHECK_CAPABILITY(can_get_bytecodes);

    /**
     * Check is_native_ptr
     */
    if( !bytecode_count_ptr || !bytecodes_ptr ) {
        return JVMTI_ERROR_NULL_POINTER;
    }
    /**
     * Check method
     */
    if( !method ) {
        return JVMTI_ERROR_INVALID_METHODID;
    }

    Method* mtd = (Method*)method;
    if( mtd->is_native() ) return JVMTI_ERROR_NATIVE_METHOD;
    if( mtd->get_byte_code_addr() == NULL ) return JVMTI_ERROR_OUT_OF_MEMORY;

    *bytecode_count_ptr = mtd->get_byte_code_size();
    jvmtiError err = _allocate( *bytecode_count_ptr, bytecodes_ptr );
    if( err != JVMTI_ERROR_NONE ) return err;
    memcpy( *bytecodes_ptr, mtd->get_byte_code_addr(), *bytecode_count_ptr );

    if (interpreter_enabled())
    {
        TIEnv *p_env = (TIEnv *)env;
        VMBreakPoints* vm_brpt = p_env->vm->vm_env->TI->vm_brpt;

        LMAutoUnlock lock(vm_brpt->get_lock());

        for (VMBreakPoint* bpt = vm_brpt->find_method_breakpoint(method); bpt;
             bpt = vm_brpt->find_next_method_breakpoint(bpt, method))
        {
            (*bytecodes_ptr)[bpt->location] =
                (unsigned char)bpt->saved_byte;
        }
    }

    return JVMTI_ERROR_NONE;
}
Esempio n. 6
0
void Buffer::_reserve_aux(Uint32 cap)
{
    if (_rep->cap == 0)
    {
        _rep = _allocate(cap, _minCap);
        _rep->size = 0;
    }
    else
        _rep = _reallocate(_rep, _next_pow_2(cap, _minCap));
}
Esempio n. 7
0
PATCH_RET_CODE OpenNew( foff len )
{
    NewFile = _allocate( len );
    if( NewFile == NULL ) {
        //PatchError( ERR_USEREAL );
        return( PATCH_BAD_PATCH );
    }
    memset( NewFile, 0, len );
    return( PATCH_RET_OKAY );
}
Esempio n. 8
0
NeuralNetwork::NeuralNetwork(int nInputs,
                              int nHidden,
                              int nOutputs,
                              float learningRate_)
//                              float decreaseConstant_,
//                              float weightDecay_)
 : learningRate(learningRate_)
//, decreaseConstant(decreaseConstant_), weightDecay(weightDecay_)
{
  _allocate(nInputs, nHidden, nOutputs);
  init();
}
Esempio n. 9
0
Matrix::Matrix(Matrix& inmatrix)
{
	_err = MCO_SUCCESS;
	_row = inmatrix._row;
	_col = inmatrix._col;

	_allocate();
	if(_err == MCO_SUCCESS){		
		for(int32 i = 0; i < _row; i++)
			for(int32 j = 0; j < _col; j++)
				_m[i][j] = inmatrix._m[i][j];
	}		
}
Esempio n. 10
0
void Buffer::_append_char_aux()
{
    if (_rep->cap == 0)
    {
        _rep = _allocate(_minCap, _minCap);
        _rep->size = 0;
    }
    else
    {
        // Check for potential overflow.
        PEGASUS_CHECK_CAPACITY_OVERFLOW(_rep->cap);
        _rep = _reallocate(_rep, _rep->cap ? (2 * _rep->cap) : _minCap);
    }
}
Esempio n. 11
0
 void reserve(std::size_t required)
 {
     if (_size == 0) {
         _bias = 0;
     }
     if (_bias + _size + required <= _capacity) {
         // nothing to do
     } else if (_size + required > _capacity || _size > _bias) {
         _allocate(required);
     } else {
         std::copy_n(_data + _bias, _size, _data);
         _bias = 0;
     }
 }
 //--------------------------------------------------------------
 //
 void BinarizationFilter::setup(int width, int height, int internalformat) {
 //    cout << "[BinarizationFilter]_setup()" << endl;
     
     //--------------------------------------
     //シェーダー
     _shader.load(
                   "shaders/binarization/binarization.vert",
                   "shaders/binarization/binarization.frag"
     );
     //--------------------------------------
     
     threshold(_threshold);
     
     _allocate(width, height, internalformat);
 }
Esempio n. 13
0
 PATCH_RET_CODE InitHoles( void )
 {
     NumHoles = 0;
     HoleArraySize = (64*1024L) / sizeof( save_hole ) - 1;
     for( ;; ) {
         HoleArray = _allocate( HoleArraySize*sizeof(save_hole) );
         if( HoleArray != NULL ) break;
         HoleArraySize /= 2;
         if( HoleArraySize < 100 ) {
     PatchError( ERR_MEMORY_OUT );
     return( PATCH_NO_MEMORY );
         }
     }
 return( PATCH_RET_OKAY );
 }
Esempio n. 14
0
McoStatus Matrix::set(int32 rows, int32 cols)
{
	_deallocate();
	_err = MCO_SUCCESS;
	
	if( rows <= 0 || cols <= 0){
		_err = MCO_FAILURE;
		return _err;
	}

	_row = rows;
	_col = cols;
	_allocate();
	
	return _err;
}
Esempio n. 15
0
Matrix::Matrix(int32 rows)
{
	_err = MCO_SUCCESS;

	if( rows <= 0 ){
		_err = MCO_FAILURE;
		return;
	}
 
	_row = _col = rows;
	_allocate();
	if(_err == MCO_SUCCESS){		
		for(int32 i = 0; i < _row; i++)
			for(int32 j = 0; j < _col; j++)
				_m[i][j] = 0;
	}		
}
Esempio n. 16
0
void __Init_Argv()

    {

	_argv = (char **) _allocate( 2 * sizeof( char * ) );

	_argv[0] = _LpPgmName;	/* fill in program name */

	_argc = _make_argv( _LpCmdLine, &_argv );

	_argv[_argc] = NULL;

	___Argc = _argc;

	___Argv = _argv;

    }
Esempio n. 17
0
Buffer& Buffer::operator=(const Buffer& x)
{
    if (&x != this)
    {
        if (x._rep->size > _rep->cap)
        {
            if (_rep->cap != 0)
                free(_rep);

            _rep = _allocate(x._rep->cap, x._minCap);
        }

        memcpy(_rep->data, x._rep->data, x._rep->size);
        _rep->size = x._rep->size;
        _minCap = x._minCap;
    }
    return *this;
}
Esempio n. 18
0
Matrix::Matrix(int32 rows, int32 cols, double val)
{
	_err = MCO_SUCCESS;

	if( rows <= 0 || cols <= 0){
		_err = MCO_FAILURE;
		return;
	}

	_row = rows;
	_col = cols;
	
	_allocate();
	if(_err == MCO_SUCCESS){		
		for(int32 i = 0; i < _row; i++)
			for(int32 j = 0; j < _col; j++)
				_m[i][j] = val;
	}		
}
//
// Allocate virtual buffer, potentially assembling it from several physical
// buffers.
//
ali_errnum_e MPFVTP::bufferAllocate( btWSSize             Length,
                                     btVirtAddr          *pBufferptr,
                                     NamedValueSet const &rInputArgs,
                                     NamedValueSet       &rOutputArgs )
{
   AutoLock(this);

   AAL_DEBUG(LM_AFU, "Trying to allocate virtual buffer of size " << std::dec << Length << std::endl);

   btBool ret;
   void *pRet;                      // for error checking
   ali_errnum_e err;

   // FIXME: Input/OUtputArgs are ignored here...

   // Round request size to proper page size
   // If the tail (= remainder of Length that doesn't fill a large buffer)
   // is large enough, extend Length to fit large buffers. Otherwise, make sure
   // it at least fills 4k pages.
   size_t tail = Length % LARGE_PAGE_SIZE;
   AAL_DEBUG(LM_AFU, "tail: " << std::dec << tail << std::endl);
   if (tail > CCI_MPF_VTP_LARGE_PAGE_THRESHOLD) {
      // if tail is large enough, align with large page size
      Length = (Length + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
      tail = 0;
   } else {
      // otherwise, align with small page size
      Length = (Length + SMALL_PAGE_SIZE - 1) & SMALL_PAGE_MASK;
      tail = Length % LARGE_PAGE_SIZE;
   }
   size_t nLargeBuffers = Length / LARGE_PAGE_SIZE;
   size_t nSmallBuffers = (Length % LARGE_PAGE_SIZE) / SMALL_PAGE_SIZE;
   MPF_ASSERT_RET( Length % SMALL_PAGE_SIZE == 0, ali_errnumNoMem );

   AAL_DEBUG(LM_AFU, "padded Length: " << std::dec << Length << std::endl);
   AAL_DEBUG(LM_AFU, std::dec << nLargeBuffers << " large and " << nSmallBuffers << " small buffers" << std::endl);

   // Map a region of the requested size.  This will reserve a virtual
   // memory address space.  As pages are allocated they will be
   // mapped into this space.
   //
   // An extra page is added to the request in order to enable alignment
   // of the base address.  Linux is only guaranteed to return 4 KB aligned
   // addresses and we want large page aligned virtual addresses.
   // TODO: Assumption is still that virtual buffer needs to be large-page
   //        (2MB) aligned, even smaller ones. Make that configurable.
   void* va_base;
   size_t va_base_len = Length + LARGE_PAGE_SIZE;
   va_base = mmap(NULL, va_base_len,
                  PROT_READ | PROT_WRITE,
                  MAP_SHARED | MAP_ANONYMOUS, -1, 0);
   MPF_ASSERT_RET(va_base != MAP_FAILED, ali_errnumNoMem);
   AAL_DEBUG(LM_AFU, "va_base " << std::hex << std::setw(2) << std::setfill('0') << va_base << std::endl);

   void* va_aligned = (void*)((size_t(va_base) + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK);
   AAL_DEBUG(LM_AFU, "va_aligned " << std::hex << std::setw(2) << std::setfill('0') << va_aligned << std::endl);

   // Trim off the unnecessary extra space after alignment
   size_t trim = LARGE_PAGE_SIZE - (size_t(va_aligned) - size_t(va_base));
   AAL_DEBUG(LM_AFU, "va_base_len trimmed by " << std::hex << std::setw(2) << std::setfill('0') << trim << " to " << va_base_len - trim << std::endl);
   pRet = mremap(va_base, va_base_len, va_base_len - trim, 0);
   MPF_ASSERT_RET(va_base == pRet, ali_errnumNoMem);
   va_base_len -= trim;

   // start at the end of the virtual buffer and work backwards
   // start with small buffers until we are done or  hit a large buffer
   // alingment boundary. Then continue with large buffers. If a large buffer
   // allocation fails, fall back to small pages.
   // TODO: make large page allocation threshold configurable

   void * va_alloc = (void *)(size_t(va_aligned) + Length);

   // Flags to indicate first/last page in an allocated region, stored in
   // the page table
   uint32_t pt_flags = MPFVTP_PT_FLAG_ALLOC_END;

   // -------------------------------------------------------------------------
   // small buffer allocation loop
   // -------------------------------------------------------------------------
   // Run to allocate small buffers until we can cover the remaining space with
   // large buffers.
   while ((size_t(va_alloc) & ( LARGE_PAGE_SIZE-1 )) != 0) {

      va_alloc = (void *)(size_t(va_alloc) - SMALL_PAGE_SIZE);

      // Shrink the reserved area in order to make a hole in the virtual
      // address space.
      pRet = mremap(va_base, va_base_len, va_base_len - SMALL_PAGE_SIZE, 0);
      MPF_ASSERT_RET(va_base == pRet, ali_errnumNoMem);
      va_base_len -= SMALL_PAGE_SIZE;

      // allocate buffer
      if (Length <= SMALL_PAGE_SIZE) {
         pt_flags |= MPFVTP_PT_FLAG_ALLOC_START;
      }
      err = _allocate((btVirtAddr)va_alloc, SMALL_PAGE_SIZE, pt_flags);
      if (err != ali_errnumOK) {
         AAL_ERR(LM_AFU, "Unable to allocate buffer. Err: " << err);
         return err;
         // FIXME: leaking already allocated pages!
      }

      pt_flags = 0;
      Length -= SMALL_PAGE_SIZE;
   }

   AAL_DEBUG(LM_AFU, "len remaining: " << std::dec << Length << std::endl);


   // -------------------------------------------------------------------------
   // large buffer allocation loop
   // -------------------------------------------------------------------------
   // Run for the remaining space, which should be an integer multiple of the
   // large buffer size in size, and aligned to large buffer boundaries. If
   // large buffer allocation fails, fall back to small buffers.
   size_t effPageSize = LARGE_PAGE_SIZE;     // page size used for actual allocations

   while (Length > 0) {

      va_alloc = (void *)(size_t(va_alloc) - effPageSize);

      // Shrink the reserved area in order to make a hole in the virtual
      // address space. If this is the last buffer to allocate, unmap reserved
      // area.
      if (va_base_len == effPageSize) {
         munmap(va_base, va_base_len);
         va_base_len = 0;
      } else {
         pRet = mremap(va_base, va_base_len, va_base_len - effPageSize, 0);
         MPF_ASSERT_RET(va_base == pRet, ali_errnumNoMem);
         va_base_len -= effPageSize;
      }

      // allocate buffer
      if (Length <= effPageSize) {
         pt_flags |= MPFVTP_PT_FLAG_ALLOC_START;
      }
      err = _allocate((btVirtAddr)va_alloc, effPageSize, pt_flags);
      if (err != ali_errnumOK) {
         if (effPageSize == LARGE_PAGE_SIZE) {
            // fall back to small buffers:
            // restore last large mapping
            if (va_base_len == 0) {
               // corner case: this was the last mapping - we destroyed it, so
               // try to restore it.
               va_base = mmap(va_alloc, LARGE_PAGE_SIZE,
                              PROT_READ | PROT_WRITE,
                              MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
               MPF_ASSERT_RET(va_base == va_alloc, ali_errnumNoMem);
            } else {
               // this was not the last mapping (or va_base is not aligned), so
               // we still have a valid reserved space. Just resize it back up.
               pRet = mremap(va_base, va_base_len, va_base_len + LARGE_PAGE_SIZE, 0);
               MPF_ASSERT_RET(pRet == va_base, ali_errnumNoMem);
            }
            va_base_len += LARGE_PAGE_SIZE;
            va_alloc = (void *)(size_t(va_alloc) + LARGE_PAGE_SIZE);
            effPageSize = SMALL_PAGE_SIZE;
            continue;    // try again with smal buffers
         } else {
            // already using small buffers, nowhere to fall back to.
            AAL_ERR(LM_AFU, "Unable to allocate buffer. Err: " << err);
            return err;
            // FIXME: leaking already allocated pages!
         }
      }

      // mapping successful, on to the next
      pt_flags = 0;
      Length -= effPageSize;
   }

   // clean up
   if (va_base_len != 0)
   {
       munmap(va_base, va_base_len);
   }

#if defined(ENABLE_DEBUG) && (0 != ENABLE_DEBUG)
   ptDumpPageTable();
#endif

   *pBufferptr = (btVirtAddr)va_aligned;
   return ali_errnumOK;
}
Esempio n. 20
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB__DPSRPCMGR_PREPAGES, "_dpsReplicaLogMgr::preparePages" )
   INT32 _dpsReplicaLogMgr::preparePages ( dpsMergeInfo &info )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY ( SDB__DPSRPCMGR_PREPAGES );
      info.getDummyBlock().clear() ;
      dpsMergeBlock &block = info.getMergeBlock () ;
      block.pageMeta().clear() ;
      dpsLogRecordHeader &head = block.record().head() ;
      UINT32 logFileSz = _logger.getLogFileSz() ;
      BOOLEAN locked = FALSE ;

      if ( _totalSize < head._length )
      {
         PD_LOG ( PDERROR, "dps total memory size[%d] less than block size[%d]",
                  _totalSize, head._length ) ;
         rc = SDB_SYS ;
         SDB_ASSERT ( 0, "system error" ) ;
         goto error ;
      }

      if ( FALSE == _restoreFlag )
      {
         _mtx.get();
         locked = TRUE ;
      }

      if ( block.isRow() && _lsn.offset != head._lsn)
      {
         PD_LOG( PDERROR, "lsn[%lld] of row is not equal to lsn[%lld] of local",
                 head._lsn, _lsn.offset) ;
         rc = SDB_SYS ;
         goto error ;
      }

      if ( DPS_INVALID_LSN_VERSION == _lsn.version )
      {
         ++_lsn.version ;
      }

      if ( !block.isRow() )
      {
      if ( ( _lsn.offset / logFileSz ) !=
            ( _lsn.offset + head._length - 1 ) / logFileSz )
      {
         SDB_ASSERT ( !block.isRow(), "replicated log record should never"
                      " hit this part" ) ;
         UINT32 dummyLogSize = logFileSz - ( _lsn.offset % logFileSz ) ;
         SDB_ASSERT ( dummyLogSize >= sizeof ( dpsLogRecordHeader ),
                      "dummy log size is smaller than log head" ) ;
         SDB_ASSERT ( dummyLogSize % sizeof(SINT32) == 0,
                      "dummy log size is not 4 bytes aligned" ) ;

         dpsLogRecordHeader &dummyhead =
                           info.getDummyBlock().record().head() ;
         dummyhead._length = dummyLogSize ;
         dummyhead._type   = LOG_TYPE_DUMMY ;
         _allocate ( dummyhead._length, info.getDummyBlock().pageMeta() ) ;

         SHARED_LOCK_NODES ( info.getDummyBlock().pageMeta()) ;
         _push2SendQueue ( info.getDummyBlock().pageMeta() ) ;
         dummyhead._lsn = _lsn.offset ;
         dummyhead._version = _lsn.version ;
         dummyhead._preLsn = _currentLsn.offset ;
         _currentLsn = _lsn ;
         _lsn.offset += dummyhead._length ;

         if ( info.isNeedNotify() && _pEventHander )
         {
            _pEventHander->onPrepareLog( info.getCSLID(), info.getCLLID(),
                                         info.getExtentLID(),
                                         dummyhead._lsn ) ;
         }
      }

      if ( ( (_lsn.offset+head._length) / logFileSz ) !=
           ( (_lsn.offset+head._length+
              sizeof(dpsLogRecordHeader)) / logFileSz ) )
      {
         SDB_ASSERT ( !block.isRow(), "replicated log record should never"
                      " hit this part" ) ;
         head._length = logFileSz - _lsn.offset % logFileSz ;
      }
      }
      _allocate( head._length, block.pageMeta() );

      SHARED_LOCK_NODES( block.pageMeta() ) ;
      _push2SendQueue( block.pageMeta() ) ;
      if ( !block.isRow() )
      {
         head._lsn = _lsn.offset;
         head._preLsn = _currentLsn.offset ;
         head._version = _lsn.version ;
      }
      else
      {
         SDB_ASSERT ( _lsn.offset == head._lsn, "row lsn error" ) ;
         _lsn.version = head._version ;
      }
      _currentLsn = _lsn ;
      _lsn.offset += head._length ;

      if ( info.isNeedNotify() && _pEventHander )
      {
         _pEventHander->onPrepareLog( info.getCSLID(), info.getCLLID(),
                                      info.getExtentLID(),
                                      head._lsn ) ;
      }

   done:
      if ( locked )
      {
         _mtx.release();
         locked = FALSE ;
      }
      PD_TRACE_EXITRC ( SDB__DPSRPCMGR_PREPAGES, rc );
      return rc;
   error:
      goto done;
   }
Esempio n. 21
0
Buffer::Buffer(const char* data, Uint32 size, Uint32 minCap): _minCap(minCap)
{
    _rep = _allocate(size, _minCap);
    _rep->size = size;
    memcpy(_rep->data, data, size);
}
Esempio n. 22
0
static int _make_argv( char *p, char ***argv )

{

    int			argc;

    char		*start;

    char		*new_arg;

    char		wildcard;

    char		lastchar;

    DIR *		dir;

    struct dirent	*dirent;

    char		drive[_MAX_DRIVE];

    char		directory[_MAX_DIR];

    char		name[_MAX_FNAME];

    char		extin[_MAX_EXT];

    char		pathin[_MAX_PATH];



    argc = 1;

    for(;;) {

	while( *p == ' ' ) ++p;	/* skip over blanks */

	if( *p == '\0' ) break;

	/* we are at the start of a parm */

	wildcard = 0;

	if( *p == '\"' ) {

	    p++;

	    new_arg = start = p;

	    for(;;) {

		/* end of parm: NULLCHAR or quote */

		if( *p == '\"' ) break;

		if( *p == '\0' ) break;

		if( *p == '\\' ) {

		    if( p[1] == '\"'  ||  p[1] == '\\' )  ++p;

		}

		*new_arg++ = *p++;

	    }

	} else {

	    new_arg = start = p;

	    for(;;) {

		/* end of parm: NULLCHAR or blank */

		if( *p == '\0' ) break;

		if( *p == ' ' ) break;

		if(( *p == '\\' )&&( p[1] == '\"' )) {

		    ++p;

		} else if( *p == '?'  ||  *p == '*' ) {

		    wildcard = 1;

		}

		*new_arg++ = *p++;

	    }

	}

	*argv = (char **) realloc( *argv, (argc+2) * sizeof( char * ) );

	if( *argv == NULL )  _Not_Enough_Memory();

	(*argv)[ argc ] = start;

	++argc;

	lastchar = *p;

	*new_arg = '\0';

	++p;

	if( wildcard ) {

	    /* expand file names */

	    dir = opendir( start );

	    if( dir != NULL ) {

		--argc;

		_splitpath( start, drive, directory, name, extin );

		for(;;) {

		    dirent = readdir( dir );

		    if( dirent == NULL ) break;

		    if( dirent->d_attr &

		      (_A_HIDDEN+_A_SYSTEM+_A_VOLID+_A_SUBDIR) ) continue;

		    _splitpath( dirent->d_name, NULL, NULL, name, extin );

		    _makepath( pathin, drive, directory, name, extin );

		    *argv = (char **) realloc( *argv, (argc+2) * sizeof( char * ) );

		    if( *argv == NULL )  _Not_Enough_Memory();

		    new_arg = (char *) _allocate( strlen( pathin ) + 1 );

		    strcpy( new_arg, pathin );

		    (*argv)[argc++] = new_arg;

		}

		closedir( dir );

	    }

	}

	if( lastchar == '\0' ) break;

    }

    return( argc );

}
Esempio n. 23
0
/*
 * Get Local Variable Table
 *
 * Return local variable information.
 *
 * OPTIONAL Functionality.
 */
jvmtiError JNICALL
jvmtiGetLocalVariableTable(jvmtiEnv* env,
                           jmethodID method,
                           jint* entry_count_ptr,
                           jvmtiLocalVariableEntry** table_ptr)
{
    TRACE("GetLocalVariableTable called");
    SuspendEnabledChecker sec;
    int len,
        index,
        count;
    char *pointer;
    Method *method_ptr;
    jvmtiError result;

    /*
     * Check given env & current phase.
     */
    jvmtiPhase phases[] = {JVMTI_PHASE_LIVE};

    CHECK_EVERYTHING();

    CHECK_CAPABILITY(can_access_local_variables);

    /**
     * Check entry_count_ptr and table_ptr
     */
    if( !entry_count_ptr || !table_ptr ) {
        return JVMTI_ERROR_NULL_POINTER;
    }
    /**
     * Check method
     */
    if( !method ) {
        return JVMTI_ERROR_INVALID_METHODID;
    } else if( ((Method*)method)->is_native() ) {
        return JVMTI_ERROR_NATIVE_METHOD;
    }

    /**
     * Get method local variable table number entries
     */
    method_ptr = (Method*)method;
    count = method_ptr->get_local_var_table_size();
    if( count == 0 ) {
        return JVMTI_ERROR_ABSENT_INFORMATION;
    }

    /**
     * Allocate memory for local variable table
     */
    *entry_count_ptr = count;
    result = _allocate( count * sizeof(jvmtiLocalVariableEntry),
                        (unsigned char**)table_ptr );
    if( result != JVMTI_ERROR_NONE ) {
        return result;
    }

    /**
     * Set local variable table
     */
    for( index = 0; index < count; index++)
    {
        String *name, *type, *generic_type;
        jvmtiLocalVariableEntry* entry = *table_ptr + index;
        method_ptr->get_local_var_entry(index,
            &(entry->start_location),
            &(entry->length),
            &(entry->slot),
            &name, &type, &generic_type);
        // allocate memory for name
        len = get_utf8_length_of_8bit( (const U_8*)name->bytes, name->len);
        result = _allocate( len + 1, (unsigned char**)&pointer );
        if( result != JVMTI_ERROR_NONE ) {
            return result;
        }
        // copy variable name
        utf8_from_8bit( pointer, (const U_8*)name->bytes, name->len);
        // set variable name
        entry->name = pointer;
        // allocate memory for signature
        len = get_utf8_length_of_8bit( (const U_8*)type->bytes, type->len);
        result = _allocate( len + 1, (unsigned char**)&pointer );
        if( result != JVMTI_ERROR_NONE ) {
            return result;
        }
        // copy variable signature
        utf8_from_8bit( pointer, (const U_8*)type->bytes, type->len);
        // set variable signature
        entry->signature = pointer;
        // set variable slot

        if (generic_type) {
            // allocate memory for generic_signature
            len = get_utf8_length_of_8bit( (const U_8*)generic_type->bytes, generic_type->len);
            result = _allocate( len + 1, (unsigned char**)&pointer );
            if( result != JVMTI_ERROR_NONE ) {
                return result;
            }
            // copy variable generic_signature
            utf8_from_8bit( pointer, (const U_8*)generic_type->bytes, generic_type->len);
            // set variable generic_signature
            entry->generic_signature = pointer;
        } else {
            entry->generic_signature = NULL;
        }
    }

    return JVMTI_ERROR_NONE;
} // jvmtiGetLocalVariableTable
Esempio n. 24
0
/*
 * Get Line Number Table
 *
 * For the method indicated by method, return a table of source
 * line number entries. The size of the table is returned via
 * entry_count_ptr and the table itself is returned via table_ptr.
 *
 * OPTIONAL Functionality.
 */
jvmtiError JNICALL
jvmtiGetLineNumberTable(jvmtiEnv* env,
                        jmethodID method,
                        jint* entry_count_ptr,
                        jvmtiLineNumberEntry** table_ptr)
{
    TRACE("GetLineNumberTable called");
    SuspendEnabledChecker sec;
    int index,
        count;
    Method *method_ptr;
    jvmtiError result;

    /*
     * Check given env & current phase.
     */
    jvmtiPhase phases[] = {JVMTI_PHASE_START, JVMTI_PHASE_LIVE};

    CHECK_EVERYTHING();

    CHECK_CAPABILITY(can_get_line_numbers);

    /**
     * Check entry_count_ptr and table_ptr
     */
    if( !entry_count_ptr || !table_ptr ) {
        return JVMTI_ERROR_NULL_POINTER;
    }
    /**
     * Check method
     */
    if( !method ) {
        return JVMTI_ERROR_INVALID_METHODID;
    } else if( ((Method*)method)->is_native() ) {
        return JVMTI_ERROR_NATIVE_METHOD;
    }

    /**
     * Get method line number table entries number
     */
    method_ptr = (Method*)method;
    count = method_ptr->get_line_number_table_size();
    if( count == 0 ) {
        return JVMTI_ERROR_ABSENT_INFORMATION;
    }

    /**
     * Allocate memory for line number table
     */
    *entry_count_ptr = count;
    result = _allocate( count * sizeof(jvmtiLineNumberEntry),
                        (unsigned char**)table_ptr );
    if( result != JVMTI_ERROR_NONE ) {
        return result;
    }

    /**
     * Set line number table
     */
    for( index = 0; index < count; ++index)
    {
        jvmtiLineNumberEntry* entry = *table_ptr + index;
        method_ptr->get_line_number_entry(index,
            &(entry->start_location),
            &(entry->line_number));
    }

    return JVMTI_ERROR_NONE;
} // jvmtiGetLineNumberTable
Esempio n. 25
0
	int32 Wire::connect(Block* b1, Pin* p1, Block* b2, Pin* p2)
	{
		Pin::SubType* st1 = NULL, * st2 = NULL;
		Block* tmp_b1 = NULL, * tmp_b2 = NULL;

		int32 ret = 0;
		char tmp[4096];

		// check if both pins are already connected
		if (p1->isConnectedTo(p2)) {
			_error_string = "pins already connected to each other";
			return FAILURE;
		}

		// check pins directions
		if (p1->getDirection() == p2->getDirection()) {
			if (p1->getDirection() != Pin::DIR_IO) {
				_error_string = "both pins have same direction";
				return FAILURE;
			}
		}

		// check data flavour
		if ((p1->getDataType() != DT_UNDEF && p1->getDataType() != DT_BYTES) &&
			(p2->getDataType() != DT_UNDEF && p2->getDataType() != DT_BYTES)) {
			if (p1->getDataType() != p2->getDataType()) {
				snprintf(tmp, sizeof(tmp),
					"incompatible data flavours: %s != %s",
					Pin::getDataTypeString(p1->getDataType()),
					Pin::getDataTypeString(p2->getDataType()));

				_error_string = tmp;
				return FAILURE;
			}
		}

		// check sub types
		st1 = p1->getSubType();
		st2 = p2->getSubType();

		if (st1->vf != VF_UNDEF && st2->vf != VF_UNDEF) {
			if (st1->vf != st2->vf) {
				switch (p1->getDataType()) {
				case DT_VIDEO:
					snprintf(tmp, sizeof(tmp),
						"incompatible sub types: %s != %s",
						VideoFormats::getVideoFormatString(st1->vf),
						VideoFormats::getVideoFormatString(st2->vf));
					break;
				case DT_AUDIO:
					snprintf(tmp, sizeof(tmp),
						"incompatible sub types: %s != %s",
						AudioFormats::getAudioFormatString(st1->af),
						AudioFormats::getAudioFormatString(st2->af));
					break;
				case DT_TEXT:
					snprintf(tmp, sizeof(tmp),
						"incompatible sub types: %s != %s",
						TextFormats::getTextFormatString(st1->tf),
						TextFormats::getTextFormatString(st2->tf));
					break;
				case DT_BYTES:
					snprintf(tmp, sizeof(tmp),
						"incompatible sub types: %s != %s",
						ByteFormats::getByteFormatString(st1->bf),
						ByteFormats::getByteFormatString(st2->bf));
					break;
				default:
					snprintf(tmp, sizeof(tmp),
						"strange, subtype set for generic pins.");
				}

				_error_string = tmp;
				return FAILURE;
			}
		}

		// connect the pins to this wire swapping them if needed
		if (p1->getDirection() == Pin::DIR_INPUT ||
			p2->getDirection() == Pin::DIR_OUTPUT) {
			// DEBUG
			UOSUTIL_DOUT(("Wire::connect(): swapping pin %s and pin %s\n",
				p1->getName(), p2->getName()));

			tmp_b1 = b2; _p1 = p2;
			tmp_b2 = b1; _p2 = p1;
		} else {
			tmp_b1 = b1; _p1 = p1;
			tmp_b2 = b2; _p2 = p2;
		}

		// associate this wire to each pin
		ret = _p1->_wires.pput(_p2->getAbsoluteName(), (char *) this);
		if (ret != SUCCESS) {
			_error_string = "cannot register wire for peer pin2";
			return FAILURE;
		}

		ret = _p2->_wires.pput(_p1->getAbsoluteName(), (char *) this);
		if (ret != SUCCESS) {
			_error_string = "cannot register wire for peer pin1";
			return FAILURE;
		}

		// connect the pins to each other
		ret = _p1->connect(tmp_b2, _p2);
		if (ret == FAILURE) {
			_error_string = "cannot connect pin1 to pin2";
			return FAILURE;
		}

		ret = _p2->connect(tmp_b1, _p1);
		if (ret == FAILURE) {
			_error_string = "cannot connect pin2 to pin1";
			return FAILURE;
		}

		// allocate buffers
		ret = _allocate();
		if (ret == FAILURE)
			return FAILURE;

		// ok
		return SUCCESS;
	}
 //--------------------------------------------------------------
 //
 void BinarizationFilter::resize(int width, int height, int internalformat) {
     _allocate(width, height, internalformat);
 }
Esempio n. 27
0
// Simple allocations
TEST(id_manager, allocate) {
    AgentServerIdManager<std::bitset<SUBSCRIPTION_ID_SPACE_SIZE>> mgr;
    _allocate(mgr, 1);
}
Esempio n. 28
0
// Simple allocations
// With initial minimum id set to some value
TEST(id_manager, allocate_with_min) {
    AgentServerIdManager<std::bitset<INTERNAL_SUBSCRIPTION_ID_SPACE_SIZE>>
                                        mgr(INTERNAL_SUBSCRIPTION_ID_SPACE_MIN);

    _allocate(mgr, INTERNAL_SUBSCRIPTION_ID_SPACE_MIN);
}
Esempio n. 29
0
void CGRVertexBuffer::activate()
{
    if (mActivated)
    {
        CCLogger::LogError("CGRVertexBuffer::activate()", "Vertex buffer already activated");
        return;
    }
    
    // First, calculate the size of the buffer. Currently assume everything is a float array
    U32 totalSize = 0;
    U32 elementSize = 0;
    for (std::vector<BufferInfoBase*>::iterator it = mBuffers.begin(); it != mBuffers.end();  it++)
    {
        totalSize += (*it)->compCount*(*it)->elementCount;
        elementSize += (*it)->compCount;
    }
    
    // Allocate our storage buffer
    mVertexArray = new F32[totalSize];
    
    // Loop through all the elements and interleave all the attributes, as in, they are stored in an array of elements, with each element having a single element of each attribute.
    // Example: Position0, UV0, Normal0, Position1, UV1, Normal1, etc.
    for (U32 element = 0; element < mElementCount; element++)
    {
        U32 offset = 0;
        for (std::vector<BufferInfoBase*>::iterator it = mBuffers.begin(); it != mBuffers.end();  it++)
        {
            // Copy in the data
            memcpy(&mVertexArray[element*elementSize+offset], &((F32*)(*it)->data)[element*(*it)->compCount], sizeof(F32)*(*it)->compCount);
            offset += (*it)->compCount;
        }
    }
    
    // Set up the bind infos for each vertex attribute so the GPU knows how to access the data
    U32 offset = 0;
    for (std::vector<BufferInfoBase*>::iterator it = mBuffers.begin(); it != mBuffers.end();  it++)
    {
        BindInfo bind;
        bind.bindLoc = (*it)->attribBind;
        bind.offset = offset*sizeof(F32);
        bind.stride = (elementSize)*sizeof(F32);
        bind.compSize = (*it)->compSize;
        bind.compCount = (*it)->compCount;
        mBindInfos.push_back(bind);
        offset += (*it)->compCount;
    }
    
    // Tell our GPU overlords to submit this buffer or whatever
    _allocate(totalSize*sizeof(F32));
    
    // We are finished with our buffer infos. Delete them to release the references
    for (std::vector<BufferInfoBase*>::iterator it = mBuffers.begin(); it != mBuffers.end(); it++)
        delete (*it);
    mBuffers.clear();
    
    // If we are using VBOs, the vertex data is on the GPU, so we don't need our local copy
    if (mUseVBOs)
    {
        delete [] mVertexArray;
        mVertexArray = NULL;
    }
    
    // We are activated
    mActivated = true;
}