예제 #1
0
파일: ByteArray.cpp 프로젝트: vkolev/flappy
ByteArray::ByteArray(const QuickVec<unsigned char>  &inValue){
    int i;
    unsigned char *val = (unsigned char*)malloc(inValue.size());
    for(i = 0; i < inValue.size(); i++)
        val[i] = inValue[i];
    mValue = (struct _value*)val;
    size = inValue.size();
}
예제 #2
0
	size_t onData( void *inBuffer, size_t inItemSize, size_t inItems)
	{
		size_t size = inItemSize*inItems;
		if (size>0)
		{
			int s = mBytes.size();
			mBytes.resize(s+size);
			memcpy(&mBytes[s],inBuffer,size);
		}
		return inItems;
	}
예제 #3
0
		bool loadOggSample(OggVorbis_File &oggFile, QuickVec<unsigned char> &outBuffer, int *channels, int *bitsPerSample, int* outSampleRate)
		{
			// 0 for Little-Endian, 1 for Big-Endian
			#ifdef HXCPP_BIG_ENDIAN
			#define BUFFER_READ_TYPE 1
			#else
			#define BUFFER_READ_TYPE 0
			#endif
			
			int bitStream;
			long bytes = 1;
			int totalBytes = 0;
			
			#define BUFFER_SIZE 32768
			
			//Get the file information
			//vorbis data
			vorbis_info *pInfo = ov_info(&oggFile, -1);            
			//Make sure this is a valid file
			if (pInfo == NULL)
			{
				LOG_SOUND("FAILED TO READ OGG SOUND INFO, IS THIS EVEN AN OGG FILE?\n");
				return false;
			}
			
			//The number of channels
			*channels = pInfo->channels;
			//default to 16? todo 
			*bitsPerSample = 16;
			//Return the same rate as well
			*outSampleRate = pInfo->rate;
			
			// Seem to need four times the read PCM total
			outBuffer.resize(ov_pcm_total(&oggFile, -1)*4);
			
			while (bytes > 0)
			{
				if (outBuffer.size() < totalBytes + BUFFER_SIZE)
				{
					outBuffer.resize(totalBytes + BUFFER_SIZE);
				}
				// Read up to a buffer's worth of decoded sound data
				bytes = ov_read(&oggFile, (char*)outBuffer.begin() + totalBytes, BUFFER_SIZE, BUFFER_READ_TYPE, 2, 1, &bitStream);
				totalBytes += bytes;
			}
			
			outBuffer.resize(totalBytes);
			ov_clear(&oggFile);
			
			#undef BUFFER_SIZE
			#undef BUFFER_READ_TYPE
			
			return true;
		}
예제 #4
0
    value snow_assets_audio_load_info_wav( value _id, value _do_read, value _bytes, value _byteOffset, value _byteLength ) {

        bool from_bytes = !val_is_null(_bytes);
        bool do_read = val_bool(_do_read);
        std::string _asset_id(val_string(_id));

            //the destination for the read, if any
        QuickVec<unsigned char> buffer;

            //the source information for the wav file
        snow::assets::audio::WAV_file_source* wav_source = new snow::assets::audio::WAV_file_source();
        wav_source->source_name = _asset_id;

        if(!from_bytes) {
            wav_source->file_source = snow::io::iosrc_from_file(_asset_id.c_str(), "rb");
        } else {
            int byteOffset = val_int(_byteOffset);
            int byteLength = val_int(_byteLength);
            const unsigned char* bytes = snow::bytes_from_hx(_bytes);
            wav_source->file_source = snow::io::iosrc_from_mem( (void*)(bytes + byteOffset), byteLength );
        }

        bool success = snow::assets::audio::load_info_wav( buffer, _asset_id.c_str(), wav_source, do_read );

        if(!success) {
            if(wav_source) { delete wav_source; wav_source = NULL; }
            return alloc_null();
        } //!success

        value data = snow::bytes_to_hx( &buffer[0], buffer.size() );

        value _object = alloc_empty_object();

            alloc_field( _object, id_id, _id );
            alloc_field( _object, id_format, alloc_int(2) ); //2 here is wav

            alloc_field( _object, id_handle, snow::to_hx<snow::assets::audio::WAV_file_source>( wav_source ) );

            value _dataobject = alloc_empty_object();

                alloc_field( _dataobject, id_channels, alloc_int(wav_source->channels) );
                alloc_field( _dataobject, id_rate, alloc_int(wav_source->rate) );
                alloc_field( _dataobject, id_bitrate, alloc_int(wav_source->bitrate) );
                alloc_field( _dataobject, id_bits_per_sample, alloc_int(wav_source->bits_per_sample) );
                alloc_field( _dataobject, id_bytes, data );
                alloc_field( _dataobject, id_length, alloc_int(wav_source->length) );
                alloc_field( _dataobject, id_length_pcm, alloc_int(wav_source->length_pcm) );

            alloc_field( _object, id_data, _dataobject );

        return _object;

    } DEFINE_PRIM(snow_assets_audio_load_info_wav, 5);
예제 #5
0
 void SetTransform(const Transform &inTrans)
 {
    int points = mCount;
    if (points!=mTransformed.size() || inTrans!=mTransform)
    {
       mTransform = inTrans;
       mTransformed.resize(points);
       UserPoint *src= (UserPoint *)&mData[ mData0 ];
       for(int i=0;i<points;i++)
       {
          mTransformed[i] = mTransform.Apply(src[i].x,src[i].y);
       }
    }
 }
예제 #6
0
   void GetExtent(CachedExtent &ioCache)
   {
      SetTransform(ioCache.mTransform);

      for(int i=0;i<mTransformed.size();i++)
         ioCache.mExtent.Add(mTransformed[i]);
   }
예제 #7
0
파일: JPEG.cpp 프로젝트: pshtif/lime
		void CopyBuffer () {
			
			mOutput.append (mTmpBuf, BUF_SIZE);
			pub.next_output_byte = mTmpBuf;
			pub.free_in_buffer = BUF_SIZE;
			
		}
예제 #8
0
void AlphaMask::Dispose()
{
   #ifdef RECYCLE_ALPHA_MASK
   sMaskCache.push_back(this);
   #else
   delete this;
   #endif
}
예제 #9
0
	virtual ByteArray releaseData()
	{
		if (mBytes.size())
		{
         return ByteArray(mBytes);
		}
		return ByteArray();
	}
예제 #10
0
                //bpp == the resulting bits per pixel
                //bpp == the source image bits per pixel
                //req_bpp == use this instead of the source
            bool load_info(
                QuickVec<unsigned char> &out_buffer,
                const char* _id,
                int* w, int* h, int* bpp, int* bpp_source, int req_bpp = 4
            ) {

                //get a io file pointer to the image
                snow::io::iosrc* src = snow::io::iosrc_from_file(_id, "rb");

                if(!src) {
                    snow::log(1, "/ snow / cannot open image file from %s", _id);
                    return false;
                }

                    //always use callbacks because we use snow abstracted IO
                stbi_io_callbacks stbi_snow_callbacks = {
                   snow_stbi_read,
                   snow_stbi_skip,
                   snow_stbi_eof
                };

                unsigned char *data = stbi_load_from_callbacks(&stbi_snow_callbacks, src, w, h, bpp_source, req_bpp);

                    //we are done with the src
                snow::io::close(src);

                snow::log(2, "/ snow / image / w:%d h:%d source bpp:%d bpp:%d\n", *w, *h, *bpp_source, req_bpp);

                if(data != NULL) {

                    int _w = *w;
                    int _h = *h;
                    int _bpp = *bpp_source;

                        //if a requested bpp was given, override it
                    if(req_bpp != 0) {
                        _bpp = req_bpp;
                    }

                        //actual used bpp
                    *bpp = _bpp;
                        //work out the total length of the buffer
                    unsigned int length = _w * _h * _bpp;
                        //store it
                    out_buffer.Set(data, length);
                        //clean up used memory
                    stbi_image_free(data);

                } else { //data != NULL

                    snow::log(1, "/ snow / image unable to be loaded by snow: %s reason: %s", _id, stbi_failure_reason());
                    return false;

                }

                return true;

            } //load_info
예제 #11
0
파일: Graphics.cpp 프로젝트: madrazo/nme
void Graphics::drawPath(const QuickVec<uint8> &inCommands, const QuickVec<float> &inData,
           WindingRule inWinding )
{
   int n = inCommands.size();
   if (n==0 || inData.size()<2)
      return;

   const UserPoint *point = (UserPoint *)&inData[0];
   const UserPoint *last =  point + inData.size()/2;

   if ( (mFillJob.mFill && mFillJob.mCommand0==mPathData->commands.size()) ||
        (mLineJob.mStroke && mLineJob.mCommand0==mPathData->commands.size()) )
     mPathData->initPosition(mCursor);

   for(int i=0;i<n && point<last;i++)
   {
      switch(inCommands[i])
      {
         case pcWideMoveTo:
            point++;
            if (point==last) break;
         case pcMoveTo:
            mPathData->moveTo(point->x,point->y);
            mCursor = *point++;
            break;

         case pcWideLineTo:
            point++;
            if (point==last) break;
         case pcLineTo:
            mPathData->lineTo(point->x,point->y);
            mCursor = *point++;
            break;

         case pcCurveTo:
            if (point+1==last) break;
            mPathData->curveTo(point->x,point->y,point[1].x,point[1].y);
            mCursor = point[1];
            point += 2;
      }
   }
   OnChanged();
}
예제 #12
0
파일: Audio.cpp 프로젝트: AlexYates/nme
bool loadOggSample(OggVorbis_File &oggFile, QuickVec<unsigned char> &outBuffer, int *channels, int *bitsPerSample, int* outSampleRate)
{
    // 0 for Little-Endian, 1 for Big-Endian
    int endian = 0;
    int bitStream;
    long bytes = 1;

#define BUFFER_SIZE 32768
    char array[BUFFER_SIZE];

    //Get the file information
    //vorbis data
    vorbis_info *pInfo = ov_info(&oggFile, -1);
    //Make sure this is a valid file
    if (pInfo == NULL)
    {
        LOG_SOUND("FAILED TO READ OGG SOUND INFO, IS THIS EVEN AN OGG FILE?\n");
        return false;
    }

    //The number of channels
    *channels = pInfo->channels;
    //default to 16? todo
    *bitsPerSample = 16;
    //Return the same rate as well
    *outSampleRate = pInfo->rate;

    while (bytes > 0)
    {
        // Read up to a buffer's worth of decoded sound data
        bytes = ov_read(&oggFile, array, BUFFER_SIZE, endian, 2, 1, &bitStream);
        // Append to end of buffer
        outBuffer.InsertAt(outBuffer.size(), (unsigned char*)array, bytes);
    }

    ov_clear(&oggFile);

#undef BUFFER_SIZE

    return true;
}
예제 #13
0
                //bpp == the resulting bits per pixel
                //bpp == the source image bits per pixel
                //req_bpp == use this instead of the source
            bool info_from_bytes(
                QuickVec<unsigned char> &out_buffer,
                const unsigned char* bytes, int byteOffset, int byteLength,
                const char* _id, int *w, int *h, int* bpp, int* bpp_source, int req_bpp = 4
            ) {

                    //get a io file pointer to the image
                snow::io::iosrc* src = snow::io::iosrc_from_mem( (void*)(bytes + byteOffset), byteLength );

                if(!src) {
                    snow::log(1, "/ snow / cannot open bytes from %s", _id);
                    return false;
                }

                    //always use callbacks because we use snow abstracted IO
                stbi_io_callbacks stbi_snow_callbacks = {
                   snow_stbi_read,
                   snow_stbi_skip,
                   snow_stbi_eof
                };

                unsigned char *data = stbi_load_from_callbacks(&stbi_snow_callbacks, src, w, h, bpp_source, req_bpp);

                    //we are done with the src
                snow::io::close(src);

                snow::log(2, "/ snow / image / w:%d h:%d source bpp:%d bpp:%d\n", *w, *h, *bpp_source, req_bpp);

                if(data != NULL) {

                    int _w = *w;
                    int _h = *h;
                    int _bpp = *bpp_source;

                        //if a requested bpp was given, override it
                    if(req_bpp != 0) {
                        _bpp = req_bpp;
                    }

                        //actual used bpp
                    *bpp = _bpp;
                        //work out the total length of the output buffer
                    unsigned int length = _w * _h * _bpp;
                        //store it
                    out_buffer.Set(data, length);
                        //clean up used memory
                    stbi_image_free(data);

                } //data != NULL

                return true;

            } //info_from_bytes
예제 #14
0
AlphaMask *AlphaMask::Create(const Rect &inRect,const Transform &inTrans)
{
   #ifdef RECYCLE_ALPHA_MASK
   int need = inRect.h+1;
   for(int i=0;i<sMaskCache.size();i++)
   {
      AlphaMask *m = sMaskCache[i];
      if (m->mLineStarts.mAlloc >=need && m->mLineStarts.size() < need+10 )
      {
         sMaskCache[i] = sMaskCache[sMaskCache.size()-1];
         sMaskCache.resize(sMaskCache.size()-1);
         m->mRect = inRect;
         m->mLineStarts.resize(need);
         m->mMatrix = *inTrans.mMatrix;
         m->mScale9 = *inTrans.mScale9;
         m->mAAFactor = inTrans.mAAFactor;
         return m;
      }
   }
   #endif
   return new AlphaMask(inRect,inTrans);
}
예제 #15
0
    value snow_assets_image_info_from_bytes( value _id, value _bytes, value _byteOffset, value _byteLength, value _req_bpp ) {

        QuickVec<unsigned char> buffer;

        int w = 0, h = 0, bpp = 0, bpp_source = 0;
        int req_bpp = val_int(_req_bpp);
        int byteOffset = val_int(_byteOffset);
        int byteLength = val_int(_byteLength);

        bool success =
            snow::assets::image::info_from_bytes(
                buffer,
                snow::bytes_from_hx(_bytes),
                byteOffset,
                byteLength,
                val_string(_id),
                &w, &h, &bpp, &bpp_source,
                req_bpp
            );

        if(!success) {
            return alloc_null();
        }

        value result_bytes = snow::bytes_to_hx( &buffer[0], buffer.size() );

        value _object = alloc_empty_object();

            alloc_field( _object, id_id, _id );
            alloc_field( _object, id_width, alloc_int(w) );
            alloc_field( _object, id_height, alloc_int(h) );
            alloc_field( _object, id_bpp, alloc_int(bpp) );
            alloc_field( _object, id_bpp_source, alloc_int(bpp_source) );
            alloc_field( _object, id_data, result_bytes );

        return _object;

    } DEFINE_PRIM(snow_assets_image_info_from_bytes, 5);
예제 #16
0
파일: Bytes.cpp 프로젝트: blue112/lime
	void Bytes::Set (const QuickVec<unsigned char> data) {
		
		int size = data.size ();
		
		if (size > 0) {
			
			Resize (size);
			memcpy (_data, &data[0], _length);
			
		} else {
			
			_data = 0;
			_length = 0;
			
		}
		
	}
예제 #17
0
   bool Hits(const RenderState &inState)
   {
      UserPoint screen(inState.mClipRect.x, inState.mClipRect.y);

      Extent2DF extent;
      CachedExtentRenderer::GetExtent(inState.mTransform,extent,true);
      if (!extent.Contains(screen))
          return false;

      UserPoint hit_test = inState.mTransform.mMatrix->ApplyInverse(screen);
      if (inState.mTransform.mScale9->Active())
      {
         hit_test.x = inState.mTransform.mScale9->InvTransX(hit_test.x);
         hit_test.y = inState.mTransform.mScale9->InvTransY(hit_test.y);
      }

      for(int i=0;i<mTransformed.size();i++)
      {
         const UserPoint &point = mTransformed[i];
         if ( fabs(point.x-screen.x) < 1 && fabs(point.y-screen.y) < 1 )
            return true;
      }
      return false;
   }
예제 #18
0
파일: SurfaceIO.cpp 프로젝트: Gemioli/lime
void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
    QuickVec<unsigned char> *buffer = (QuickVec<unsigned char> *)png_get_io_ptr(png_ptr);
    buffer->append((unsigned char *)data,(int)length);
} 
예제 #19
0
파일: SurfaceIO.cpp 프로젝트: Gemioli/lime
 void TermBuffer()
 {
    mOutput.append( mTmpBuf, BUF_SIZE - pub.free_in_buffer );
 }
예제 #20
0
void ConvertOutlineToTriangles(Vertices &ioOutline,const QuickVec<int> &inSubPolys)
{
   // Order polygons ...
   int subs = inSubPolys.size();
   if (subs<1)
      return;

   QuickVec<SubInfo> subInfo;
   QuickVec<EdgePoint> edges(ioOutline.size());
   int index = 0;
   int groupId = 0;

   for(int sub=0;sub<subs;sub++)
   {
      SubInfo info;

      info.p0 = sub>0?inSubPolys[sub-1]:0;
      info.size = inSubPolys[sub] - info.p0;
      if (ioOutline[info.p0] == ioOutline[info.p0+info.size-1])
         info.size--;

      if (info.size>2)
      {
         UserPoint *p = &ioOutline[info.p0];
         double area = 0.0;
         for(int i=2;i<info.size;i++)
         {
            UserPoint v_prev = p[i-1] - p[0];
            UserPoint v_next = p[i] - p[0];
            area += v_prev.Cross(v_next);
         }
         bool reverse = area < 0;
         int  parent = -1;

         for(int prev=subInfo.size()-1; prev>=0 && parent==-1; prev--)
         {
            if (subInfo[prev].contains(p[0]))
            {
               int prev_p0 = subInfo[prev].p0;
               int prev_size = subInfo[prev].size;
               int inside = PIP_MAYBE;
               for(int test_point = 0; test_point<info.size && inside==PIP_MAYBE; test_point++)
               {
                  inside =  PointInPolygon( p[test_point], &ioOutline[prev_p0], prev_size);
                  if (inside==PIP_YES)
                     parent = prev;
               }
            }
         }

         if (parent==-1 || subInfo[parent].is_internal )
         {
            info.group = groupId++;
            info.is_internal = false;
         }
         else
         {
            info.group = subInfo[parent].group;
            info.is_internal = true;
         }

         info.first = &edges[index];
         AddSubPoly(info.first,p,info.size,reverse!=info.is_internal);
         if (sub<subs-1)
            info.calcExtent();
         index += info.size;

         subInfo.push_back(info);
      }
   }

   Vertices triangles;
   for(int group=0;group<groupId;group++)
   {
      int first = -1;
      int size = 0;
      for(int sub=0;sub<subInfo.size();sub++)
      {
         SubInfo &info = subInfo[sub];
         if (info.group==group)
         {
            if (first<0)
            {
               first = sub;
               size = info.size;
            }
            else
            {
               LinkSubPolys(subInfo[first].first,info.first, info.link);
               size += info.size + 2;
            }
         }
      }
      ConvertOutlineToTriangles(subInfo[first].first, size,triangles);
   }

   ioOutline.swap(triangles);
}
예제 #21
0
 ~TriangleRender()
 {
    for(int i=0;i<mAlphaMasks.size();i++)
       if (mAlphaMasks[i])
         mAlphaMasks[i]->Dispose();
 }
예제 #22
0
namespace nme
{

QuickVec<AlphaMask *> sMaskCache;

//#define RECYCLE_ALPHA_MASK

AlphaMask *AlphaMask::Create(const Rect &inRect,const Transform &inTrans)
{
   #ifdef RECYCLE_ALPHA_MASK
   int need = inRect.h+1;
   for(int i=0;i<sMaskCache.size();i++)
   {
      AlphaMask *m = sMaskCache[i];
      if (m->mLineStarts.mAlloc >=need && m->mLineStarts.size() < need+10 )
      {
         sMaskCache[i] = sMaskCache[sMaskCache.size()-1];
         sMaskCache.resize(sMaskCache.size()-1);
         m->mRect = inRect;
         m->mLineStarts.resize(need);
         m->mMatrix = *inTrans.mMatrix;
         m->mScale9 = *inTrans.mScale9;
         m->mAAFactor = inTrans.mAAFactor;
         return m;
      }
   }
   #endif
   return new AlphaMask(inRect,inTrans);
}

void AlphaMask::Dispose()
{
   #ifdef RECYCLE_ALPHA_MASK
   sMaskCache.push_back(this);
   #else
   delete this;
   #endif
}

void AlphaMask::ClearCache()
{
   #ifdef RECYCLE_ALPHA_MASK
   for(int i=0;i<sMaskCache.size();i++)
      delete sMaskCache[i];
   sMaskCache.resize(0);
   #endif
}

bool AlphaMask::Compatible(const Transform &inTransform,
                           const Rect &inExtent, const Rect &inVisiblePixels,
                           int &outTX, int &outTY )
{
   int tx,ty;
   if  ( (!mMatrix.IsIntTranslation(*inTransform.mMatrix,tx,ty)) || (mScale9!=*inTransform.mScale9) )
      return false;

   if (mAAFactor!=inTransform.mAAFactor)
      return false;

   // Translate our cached pixels to this new position ...
   Rect translated = mRect.Translated(tx,ty);
   if (translated.Contains(inVisiblePixels))
   {
      outTX = tx;
      outTY = ty;
      return true;
   }

   return false;
}

void AlphaMask::RenderBitmap(int inTX,int inTY,
         const RenderTarget &inTarget,const RenderState &inState)
{
   if (mLineStarts.size()<2)
      return;

   Rect clip = inState.mClipRect;
   int y = mRect.y + inTY;
   const int *start = &mLineStarts[0] - y;

   int y1 = mRect.y1() + inTY;
   clip.ClipY(y,y1);

   for(; y<y1; y++)
   {
      int sy = y - inTY;
      const AlphaRun *end = &mAlphaRuns[ start[y+1] ];
      const AlphaRun *run = &mAlphaRuns[ start[y] ];
      if (run!=end)
      {
         Uint8 *dest0 = inTarget.Row(y);
         while(run<end && run->mX1 + inTX<=clip.x)
            run++;

         while(run<end)
         {
            int x0 = run->mX0 + inTX;
            if (x0 >= clip.x1())
               break;
            int x1 = run->mX1 + inTX;
            clip.ClipX(x0,x1);

            Uint8 *dest = dest0 + x0;
            int alpha = run->mAlpha;

            if (alpha>0)
            {
               if (alpha>=255)
                  while(x0++<x1)
                     *dest++ = 255;
               else
                  while(x0++<x1)
                     QBlendAlpha( *dest++, alpha );
            }
            ++run;
         }
      }
   }

}

}
예제 #23
0
파일: Audio.cpp 프로젝트: AlexYates/nme
bool loadWavSampleFromBytes(const float *inData, int len, QuickVec<unsigned char> &outBuffer, int *channels, int *bitsPerSample, int* outSampleRate)
{
    const char* start = (const char*)inData;
    const char* end = start + len;
    const char* ptr = start;
    WAVE_Format wave_format;
    RIFF_Header riff_header;
    WAVE_Data wave_data;
    unsigned char* data;

    // Read in the first chunk into the struct
    memcpy(&riff_header, ptr, sizeof(RIFF_Header));
    ptr += sizeof(RIFF_Header);

    //check for RIFF and WAVE tag in memeory
    if ((riff_header.chunkID[0] != 'R'  ||
            riff_header.chunkID[1] != 'I'  ||
            riff_header.chunkID[2] != 'F'  ||
            riff_header.chunkID[3] != 'F') ||
            (riff_header.format[0] != 'W'  ||
             riff_header.format[1] != 'A'  ||
             riff_header.format[2] != 'V'  ||
             riff_header.format[3] != 'E'))
    {
        LOG_SOUND("Invalid RIFF or WAVE Header!\n");
        return false;
    }

    //Read in the 2nd chunk for the wave info
    ptr = find_chunk(ptr, end, "fmt ");
    if (!ptr) {
        return false;
    }
    readStruct(wave_format, ptr);

    //check for fmt tag in memory
    if (wave_format.subChunkID[0] != 'f' ||
            wave_format.subChunkID[1] != 'm' ||
            wave_format.subChunkID[2] != 't' ||
            wave_format.subChunkID[3] != ' ')
    {
        LOG_SOUND("Invalid Wave Format!\n");
        return false;
    }

    ptr = find_chunk(ptr, end, "data");
    if (!ptr) {
        return false;
    }

    const char* base = readStruct(wave_data, ptr);

    //check for data tag in memory
    if (wave_data.subChunkID[0] != 'd' ||
            wave_data.subChunkID[1] != 'a' ||
            wave_data.subChunkID[2] != 't' ||
            wave_data.subChunkID[3] != 'a')
    {
        LOG_SOUND("Invalid Wav Data Header!\n");
        return false;
    }

    //Allocate memory for data
    //data = new unsigned char[wave_data.subChunk2Size];

    // Read in the sound data into the soundData variable
    size_t size = wave_data.subChunkSize;
    if (size > (end - base)) {
        return false;
    }

    /*mlChannels = wave_format.numChannels;
    if (mlChannels == 2)
    {
    	if (wave_format.bitsPerSample == 8)
    	{
    		mFormat = AL_FORMAT_STEREO8;
    		mlSamples = size / 2;
    	}
    	else //if (wave_format.bitsPerSample == 16)
    	{
    		mlSamples = size / 4;
    		mFormat = AL_FORMAT_STEREO16;
    	}
    } else //if (mlChannels == 1)
    {
    	if (wave_format.bitsPerSample == 8)
    	{
    		mlSamples = size;
    		mFormat = AL_FORMAT_MONO8;
    	}
    	else //if (wave_format.bitsPerSample == 16)
    	{
    		mlSamples = size / 2;
    		mFormat = AL_FORMAT_MONO16;
    	}
    }
    mlFrequency = wave_format.sampleRate;
    mfTotalTime = float(mlSamples) / float(mlFrequency);*/

    //Store in the outbuffer
    outBuffer.Set((unsigned char*)base, size);

    //Now we set the variables that we passed in with the
    //data from the structs
    *outSampleRate = (int)wave_format.sampleRate;

    //The format is worked out by looking at the number of
    //channels and the bits per sample.
    *channels = wave_format.numChannels;
    *bitsPerSample = wave_format.bitsPerSample;

    //clean up and return true if successful
    //fclose(f);
    //delete[] data;

    return true;
}
예제 #24
0
   bool Render( const RenderTarget &inTarget, const RenderState &inState )
   {
      Extent2DF extent;
      CachedExtentRenderer::GetExtent(inState.mTransform,extent,true);

      if (!extent.Valid())
         return true;

      // Get bounding pixel rect
      Rect rect = inState.mTransform.GetTargetRect(extent);

      // Intersect with clip rect ...
      Rect visible_pixels = rect.Intersect(inState.mClipRect);
      int x0 = visible_pixels.x;
      int y0 = visible_pixels.y;
      int x1 = visible_pixels.x1();
      int y1 = visible_pixels.y1();


      if (!mHasColours)
      {
         int val =  mCol.ival;
         // 100% alpha...
         if ( ( (val & 0xff000000) == 0xff000000 ) || (inTarget.mPixelFormat & pfHasAlpha) )
         {
            for(int i=0;i<mTransformed.size();i++)
            {
                const UserPoint &point = mTransformed[i];
                int tx = point.x;
                if (x0<=tx && tx<x1)
                {
                   int ty = point.y;
                   if (y0<=ty && ty<y1)
                      ((int *)inTarget.Row(ty))[tx] = val;
                }
             }
         }
         else
         {
            ARGB argb = mCol;

            for(int i=0;i<mTransformed.size();i++)
            {
               const UserPoint &point = mTransformed[i];
               int tx = point.x;
               if (x0<=tx && tx<x1)
               {
                  int ty = point.y;
                  if (y0<=ty && ty<y1)
                     ((ARGB *)inTarget.Row(ty))[tx].QBlendA(argb);
               }
            }
         }
      }
      else
      {
         ARGB *argb = (ARGB *) & mData[mData0 + mTransformed.size()*2];
         if (inTarget.mPixelFormat & pfHasAlpha)
            for(int i=0;i<mTransformed.size();i++)
            {
               const UserPoint &point = mTransformed[i];
               int tx = point.x;
               if (x0<=tx && tx<x1)
               {
                  int ty = point.y;
                  if (y0<=ty && ty<y1)
                     ((ARGB *)inTarget.Row(ty))[tx].QBlendA(argb[i]);
               }
            }
         else
            for(int i=0;i<mTransformed.size();i++)
            {
               const UserPoint &point = mTransformed[i];
               int tx = point.x;
               if (x0<=tx && tx<x1)
               {
                  int ty = point.y;
                  if (y0<=ty && ty<y1)
                     ((ARGB *)inTarget.Row(ty))[tx].QBlend( argb[i] );
               }
            }
      }
      
      return true;
   }
예제 #25
0
   OpenALSound::OpenALSound(float *inData, int len)
   {
      IncRef();
      mBufferID = 0;
      mIsStream = false;
      
      QuickVec<uint8> buffer;
      int _channels;
      int _bitsPerSample;
      ALenum  format;
      ALsizei freq;
      bool ok = false; 
      
      //Determine the file format before we try anything
      AudioFormat type = Audio::determineFormatFromBytes(inData, len);
      
      switch(type) {
         case eAF_ogg:
            ok = Audio::loadOggSampleFromBytes(inData, len, buffer, &_channels, &_bitsPerSample, &freq );
         break;
         case eAF_wav:
            ok = Audio::loadWavSampleFromBytes(inData, len, buffer, &_channels, &_bitsPerSample, &freq );
         break;
         default:
            LOG_SOUND("Error opening sound file, unsupported type.\n");
      }
      
      //Work out the format from the data
      if (_channels == 1) {
         if (_bitsPerSample == 8 ) {
            format = AL_FORMAT_MONO8;
         } else if (_bitsPerSample == 16) {
            format = (int)AL_FORMAT_MONO16;
         }
      } else if (_channels == 2) {
         if (_bitsPerSample == 8 ) {
            format = (int)AL_FORMAT_STEREO8;
         } else if (_bitsPerSample == 16) {
            format = (int)AL_FORMAT_STEREO16;
         }
      } //channels = 2
       
      
      if (!ok) {
         LOG_SOUND("Error opening sound data\n");
         mError = "Error opening sound data";
      } else if (alGetError() != AL_NO_ERROR) {
         LOG_SOUND("Error after opening sound data\n");
         mError = "Error after opening sound data";  
      } else {
            // grab a buffer ID from openAL
         alGenBuffers(1, &mBufferID);
         
            // load the awaiting data blob into the openAL buffer.
         alBufferData(mBufferID,format,&buffer[0],buffer.size(),freq); 

            // once we have all our information loaded, get some extra flags
         alGetBufferi(mBufferID, AL_SIZE, &bufferSize);
         alGetBufferi(mBufferID, AL_FREQUENCY, &frequency);
         alGetBufferi(mBufferID, AL_CHANNELS, &channels);    
         alGetBufferi(mBufferID, AL_BITS, &bitsPerSample); 
         
      }
   }
예제 #26
0
   OpenALSound::OpenALSound(const std::string &inFilename, bool inForceMusic)
   {
      IncRef();
      mBufferID = 0;
      mIsStream = false;
      mTotalTime = -1;
      
      #ifdef HX_MACOS
      char fileURL[1024];
      GetBundleFilename(inFilename.c_str(),fileURL,1024);
      #else
      #ifdef IPHONE
      std::string asset = GetResourcePath() + gAssetBase + inFilename;
      const char *fileURL = asset.c_str();
      #else
      const char *fileURL = inFilename.c_str();
      #endif
      #endif
      
      if (!fileURL) {
         
         //LOG_SOUND("OpenALSound constructor() error in url");
         mError = "Error int url: " + inFilename;

      } else {

         QuickVec<uint8> buffer;
         int _channels;
         int _bitsPerSample;
         ALenum  format;
         ALsizei freq;
         bool ok = false; 

            //Determine the file format before we try anything
         AudioFormat type = Audio::determineFormatFromFile(std::string(fileURL));
         switch(type) {
            case eAF_ogg:
               if (inForceMusic)
               {
                  mIsStream = true;
                  mStreamPath = fileURL;
               }
               else
               {
                  ok = Audio::loadOggSampleFromFile( fileURL, buffer, &_channels, &_bitsPerSample, &freq );
               }
            break;
            case eAF_wav:
               ok = Audio::loadWavSampleFromFile( fileURL, buffer, &_channels, &_bitsPerSample, &freq );
            break;
            default:
               LOG_SOUND("Error opening sound file, unsupported type.\n");
         }
         
         if (mIsStream)
            return;
         
            //Work out the format from the data
         if (_channels == 1) {
            if (_bitsPerSample == 8 ) {
               format = AL_FORMAT_MONO8;
            } else if (_bitsPerSample == 16) {
               format = (int)AL_FORMAT_MONO16;
            }
         } else if (_channels == 2) {
            if (_bitsPerSample == 8 ) {
               format = (int)AL_FORMAT_STEREO8;
            } else if (_bitsPerSample == 16) {
               format = (int)AL_FORMAT_STEREO16;
            }
         } //channels = 2
          
         
         if (!ok) {
            LOG_SOUND("Error opening sound data\n");
            mError = "Error opening sound data";
         } else if (alGetError() != AL_NO_ERROR) {
            LOG_SOUND("Error after opening sound data\n");
            mError = "Error after opening sound data";  
         } else {
               // grab a buffer ID from openAL
            alGenBuffers(1, &mBufferID);
            
               // load the awaiting data blob into the openAL buffer.
            alBufferData(mBufferID,format,&buffer[0],buffer.size(),freq); 

               // once we have all our information loaded, get some extra flags
            alGetBufferi(mBufferID, AL_SIZE, &bufferSize);
            alGetBufferi(mBufferID, AL_FREQUENCY, &frequency);
            alGetBufferi(mBufferID, AL_CHANNELS, &channels);    
            alGetBufferi(mBufferID, AL_BITS, &bitsPerSample); 
            
         } //!ok
      }
   }
예제 #27
0
파일: Audio.cpp 프로젝트: AlexYates/nme
bool loadWavSampleFromFile(const char *inFileURL, QuickVec<unsigned char> &outBuffer, int *channels, int *bitsPerSample, int* outSampleRate)
{
    //http://www.dunsanyinteractive.com/blogs/oliver/?p=72

    //Local Declarations
    FILE* f = NULL;
    WAVE_Format wave_format;
    RIFF_Header riff_header;
    WAVE_Data wave_data;
    unsigned char* data;

#ifdef ANDROID
    FileInfo info = AndroidGetAssetFD(inFileURL);
    f = fdopen(info.fd, "rb");
    fseek(f, info.offset, 0);
#else
    f = fopen(inFileURL, "rb");
#endif

    if (!f)
    {
        LOG_SOUND("FAILED to read sound file, file pointer as null?\n");
        return false;
    }

    // Read in the first chunk into the struct
    fread(&riff_header, sizeof(RIFF_Header), 1, f);
    //check for RIFF and WAVE tag in memeory
    if ((riff_header.chunkID[0] != 'R'  ||
            riff_header.chunkID[1] != 'I'  ||
            riff_header.chunkID[2] != 'F'  ||
            riff_header.chunkID[3] != 'F') ||
            (riff_header.format[0] != 'W'  ||
             riff_header.format[1] != 'A'  ||
             riff_header.format[2] != 'V'  ||
             riff_header.format[3] != 'E'))
    {
        LOG_SOUND("Invalid RIFF or WAVE Header!\n");
        return false;
    }

    //Read in the 2nd chunk for the wave info
    fread(&wave_format, sizeof(WAVE_Format), 1, f);

    //check for fmt tag in memory
    if (wave_format.subChunkID[0] != 'f' ||
            wave_format.subChunkID[1] != 'm' ||
            wave_format.subChunkID[2] != 't' ||
            wave_format.subChunkID[3] != ' ')
    {
        LOG_SOUND("Invalid Wave Format!\n");
        return false;
    }

    //check for extra parameters;
    if (wave_format.subChunkSize > 16)
    {
        fseek(f, sizeof(short), SEEK_CUR);
    }

    //Read in the the last byte of data before the sound file
    fread(&wave_data, sizeof(WAVE_Data), 1, f);

    //check for data tag in memory
    if (wave_data.subChunkID[0] != 'd' ||
            wave_data.subChunkID[1] != 'a' ||
            wave_data.subChunkID[2] != 't' ||
            wave_data.subChunkID[3] != 'a')
    {
        LOG_SOUND("Invalid Wav Data Header!\n");
        return false;
    }

    //Allocate memory for data
    data = new unsigned char[wave_data.subChunkSize];

    // Read in the sound data into the soundData variable
    if (!fread(data, wave_data.subChunkSize, 1, f))
    {
        LOG_SOUND("error loading WAVE data into struct!\n");
        return false;
    }

    //Store in the outbuffer
    outBuffer.Set(data, wave_data.subChunkSize);

    //Now we set the variables that we passed in with the
    //data from the structs
    *outSampleRate = (int)wave_format.sampleRate;

    //The format is worked out by looking at the number of
    //channels and the bits per sample.
    *channels = wave_format.numChannels;
    *bitsPerSample = wave_format.bitsPerSample;

    //clean up and return true if successful
    fclose(f);
    delete[] data;

    return true;
}