Пример #1
0
int WINAPI WinMain (
	__in HINSTANCE hInstance,
	__in_opt HINSTANCE hPrevInstance,
	__in LPSTR lpCmdLine,
	__in int nShowCmd
	)
{
	appPath = lpCmdLine;

#if 0
	DynamicMessageHelper file;
	if (file.Read("data\\TestAnimHandlers\\BinHandlers\\SpinXYZ,ffd",true))
	{
		ARMCore armTest;
		DataChunk memory;
		memory.Allocate(file.GetBufferSize());
		memcpy(memory.mData,file.GetBuffer(),file.GetBufferSize());
		armTest.WriteMemory(0x10000,file.GetBuffer(),file.GetBufferSize());

		// Get the frame code offset from the loaded handler
		unsigned int offset;
		file >> offset;
		armTest.SetPC(0x80000000 | 0x10000 + offset);
		armTest.SetRegister(0,6);
		armTest.Execute();
	}
Пример #2
0
Status
chunkDecode(DataChunk &result, const std::string &in)
{
    // The string must be a multiple of the chunk size:
    if (in.size() % Chars)
        return ABC_ERROR(ABC_CC_ParseError, "Bad encoding");

    DataChunk out;
    out.reserve(Bytes * (in.size() / Chars));

    constexpr unsigned shift = 8 * Bytes / Chars; // Bits per character
    uint16_t buffer = 0; // Bits waiting to be written out, MSB first
    int bits = 0; // Number of bits currently in the buffer
    auto i = in.begin();
    while (i != in.end())
    {
        // Read one character from the string:
        int value = Decode(*i);
        if (value < 0)
            break;
        ++i;

        // Append the bits to the buffer:
        buffer |= value << (16 - bits - shift);
        bits += shift;

        // Write out some bits if the buffer has a byte's worth:
        if (8 <= bits)
        {
            out.push_back(buffer >> 8);
            buffer <<= 8;
            bits -= 8;
        }
    }
Пример #3
0
void DataContainer::Allocate() {

	// Check that memory has not been allocated
	if (m_pAllocatedMemory != NULL) {
		_EXCEPTIONT("Attempting Allocate() on attached DataContainer");
	}

	// Allocate memory as one contiguous chunk
	size_t sTotalByteSize = GetTotalByteSize();

	m_pAllocatedMemory =
		reinterpret_cast<unsigned char*>(malloc(sTotalByteSize));

	if (m_pAllocatedMemory == NULL) {
		_EXCEPTIONT("Out of memory");
	}

	// Initialize allocated memory to zero
	memset(m_pAllocatedMemory, 0, sTotalByteSize);

	// Assign memory to DataChunks
	unsigned char * pAccumulated = m_pAllocatedMemory;

	for (size_t i = 0; i < m_vecDataChunks.size(); i++) {
		DataChunk * pDataChunk =
			reinterpret_cast<DataChunk*>(m_vecDataChunks[i]);

		pDataChunk->AttachToData(
			reinterpret_cast<void *>(pAccumulated));

#pragma message "Alignment may be an issue here on 32-bit systems"
		pAccumulated += pDataChunk->GetByteSize();
	}
}
Пример #4
0
void wait_futures() {
    for (int i = 0; i < nbThreads; i++) {
        futures[i].wait();
        ASSERT_TRUE(sharedData.isReferencing(threadsBuffer));
        ASSERT_TRUE(futures[i].get() == sharedData.toInt());
    }
}
Пример #5
0
void StreamDataTest::test_isValid()
{
    {
        // Use Case:
        // Stream Data invalid, no associated data
        // expect invalid
        StreamData sd("",(void*)0,10);
        CPPUNIT_ASSERT( ! sd.isValid() );
    }
    {
        // Use Case:
        // Stream Data valid, no associated data
        // expect valid
        StreamData sd("",(void*)1000,10);
        CPPUNIT_ASSERT( sd.isValid() );
    }
    {
        // Use Case:
        // Stream Data is valid, but associate Data is not
        // expect invalid
        DataChunk d;
        {
            StreamData sd("",(void*)1000,10);
            CPPUNIT_ASSERT( ! d.isValid() );
            CPPUNIT_ASSERT( sd.isValid() );
            boost::shared_ptr<DataChunk> ld( new DataChunk("test", &d) );
            sd.addAssociatedData( ld );
            CPPUNIT_ASSERT( ! sd.isValid() );
        }
    }
}
Пример #6
0
    static void* WorkerThreadLoop(void * workerThread) {

        WorkerThread* thread = static_cast<WorkerThread*>(workerThread);

        int sum = 0;

        unsigned t0 = clock();

        while(true) {
            // Get next task
            DataChunk data = thread->mDataSource->GetDataChunk();
            if(data.GetSize() == 0) break;

            // Execute task
            for(int i = 0; i < data.GetSize(); i++) {
                usleep(1000);
                sum += data[i];
            }
        }

        thread->mResultData->AddResult(sum);
        printf("%s: Thread is done:%d sum:%d time:%lu\n", __func__, thread->mTID, sum, clock() - t0);

        return NULL;
    }
Пример #7
0
/*! \param Offset Offset from the start of the KLV value from which to start reading
 *  \param Size Number of bytes to read, if -1 all available bytes will be read (which could be billions!)
 *  \return The number of bytes read
 *
 *  DRAGONS: This base function may be called from derived class objects to get base behaviour.
 *           It is therefore vital that the function does not call any "virtual" KLVObject
 *           functions, directly or indirectly.
 */
size_t KLVObject::Base_ReadDataFrom(DataChunk &Buffer, Position Offset, size_t Size /*=-1*/)
{
	// Delagate to ReadHandler if defined
	if(ReadHandler) return ReadHandler->ReadData(Buffer, this, Offset, Size);

	if(Source.Offset < 0)
	{
		error("Call to KLVObject::Base_ReadDataFrom() with no read handler defined and DataBase undefined\n");
		return 0;
	}

	if(!Source.File)
	{
		error("Call to KLVObject::Base_ReadDataFrom() with no read handler defined and source file not set\n");
		return 0;
	}

	// Initially plan to read all the bytes available
	Length BytesToRead = Source.OuterLength - Offset;

	// Limit to specified size if > 0 and if < available
	if( (Size > 0) && (Size < BytesToRead)) BytesToRead = Size;

	// Don't do anything if nothing to read
	if(BytesToRead <= 0) 
	{
		Buffer.Resize(0);
		return 0;
	}

	// Sanity check the size of this read
	if((sizeof(size_t) < 8) && (BytesToRead > 0xffffffff))
	{
		error("Tried to read > 4GBytes, but this platform can only handle <= 4GByte chunks\n");
		return 0;
	}

	// Seek to the start of the requested data
	Source.File->Seek(Source.Offset + Source.KLSize + Offset);

	// Resize the chunk
	// Discarding old data first (by setting Size to 0) prevents old data being 
	// copied needlessly if the buffer is reallocated to increase its size
	Buffer.Size = 0;
	Buffer.Resize(static_cast<size_t>(BytesToRead));

	// Read into the buffer (only as big as the buffer is!)
	size_t Bytes = Source.File->Read(Buffer.Data, Buffer.Size);

	// Resize the buffer if something odd happened (such as an early end-of-file)
	if(Bytes != static_cast<size_t>(BytesToRead)) Buffer.Resize(Bytes);

	return Bytes;
}
Пример #8
0
Status
randomData(DataChunk &result, size_t size)
{
    DataChunk out;
    out.resize(size);

    if (!RAND_bytes(out.data(), out.size()))
        return ABC_ERROR(ABC_CC_Error, "Random data generation failed");

    result = std::move(out);
    return Status();
}
Пример #9
0
DataChunk
buildData(std::initializer_list<DataSlice> slices)
{
    size_t size = 0;
    for (auto slice: slices)
        size += slice.size();

    DataChunk out;
    out.reserve(size);
    for (auto slice: slices)
        out.insert(out.end(), slice.begin(), slice.end());
    return out;
}
Пример #10
0
/*-----------------------------------------------------------------------------
  Append data to the SSL/TLS stream and handle packet framing.
-----------------------------------------------------------------------------*/
void SSLStream::Append(const DataChunk& chunk) {
  if (process_data_) {
    size_t len = chunk.GetLength();
    const char * buff = chunk.GetData();
  
    while (len && buff) {
      size_t copy_bytes = 0;

      // are we starting a new frame?
      if (message_size_ < 0) {
        // see if we can at least copy over the size of the initial frame
        size_t needed = sizeof(SSL_HEADER) - message_len_;
        copy_bytes = min(needed, len);
        if (copy_bytes) {
          memcpy(&message_[message_len_], buff, copy_bytes);
          message_len_ += (int)copy_bytes;
          len -= copy_bytes;
          buff += copy_bytes;
        }

        // see if we have a header to parse and get the actual message size
        if (message_len_ >= sizeof(SSL_HEADER)) {
          SSL_HEADER * header = (SSL_HEADER *)message_;
          message_size_ = htons(header->record_length) + sizeof(SSL_HEADER);
        }
      }

      // see if we have bytes remaining in the current message
      if (message_size_ > 0 &&
          message_len_ < message_size_ &&
          len > 0 &&
          buff) {
        copy_bytes = min(message_size_ - message_len_, (__int32)len);
        memcpy(&message_[message_len_], buff, copy_bytes);
        message_len_ += (int)copy_bytes;
        len -= copy_bytes;
        buff += copy_bytes;
      }

      // see if we have a full message
      if (message_size_ == message_len_) {
        ProcessMessage();

        // reset state for the next message
        message_size_ = -1;
        message_len_ = 0;
      }
    }
  }
}
Пример #11
0
TEST_F(testsDataChunk, DefaultConstructor) {
    DataChunk empty;
    EXPECT_TRUE(empty.isReferencing(NULL));
    EXPECT_TRUE(empty.getData() == NULL);
    EXPECT_TRUE(empty.getSize() == 0);
    DataChunk emptyRef;
    emptyRef.reference(empty);
    EXPECT_TRUE(emptyRef.isReferencing(NULL));
    EXPECT_TRUE(emptyRef.getData() == NULL);
    EXPECT_TRUE(emptyRef.getSize() == 0);
    EXPECT_TRUE(emptyRef == empty);
}
Пример #12
0
Status
watcherBridgeRawTx(Wallet &self, const char *szTxID,
    DataChunk &result)
{
    Watcher *watcher = nullptr;
    ABC_CHECK(watcherFind(watcher, self));

    bc::hash_digest txid;
    if (!bc::decode_hash(txid, szTxID))
        return ABC_ERROR(ABC_CC_ParseError, "Bad txid");
    auto tx = watcher->find_tx(txid);
    result.resize(satoshi_raw_size(tx));
    bc::satoshi_save(tx, result.begin());

    return Status();
}
Пример #13
0
void DataContainer::Detach() {

	for (size_t i = 0; i < m_vecDataChunks.size(); i++) {
		DataChunk * pDataChunk =
			reinterpret_cast<DataChunk *>(m_vecDataChunks[i]);

		pDataChunk->Detach();
	}

	if ((m_fOwnsData) && (m_pAllocatedMemory != NULL)) {
		free(m_pAllocatedMemory);
	}

	m_fOwnsData = true;
	m_pAllocatedMemory = NULL;
}
Пример #14
0
namespace testDataChunk {
const char* threadsBuffer = "Threads";
const int nbThreads = 50;

int makeDataChunks(const int nbLoop, DataChunk& output) {
    int ret = 0;
    for (int i = 0; i < nbLoop; i++) {
        DataChunk chunk(threadsBuffer);
        ret = chunk.toInt();
        EXPECT_TRUE(chunk.isReferencing(threadsBuffer));
        output.reference(chunk);
    }
    return ret;
}

static std::promise<int> promises[nbThreads];
static std::future<int> futures[nbThreads];

DataChunk sharedData;

void detach_threads() {
    const int nbDataChunk = 1000;
    for (int i = 0; i < nbThreads; i++) {
        promises[i] = std::promise<int>();
        futures[i] = promises[i].get_future();
        std::thread([](std::promise<int>& p) {p.set_value(makeDataChunks(nbDataChunk,sharedData));},
                std::ref(promises[i])).detach();
    }
}

void wait_futures() {
    for (int i = 0; i < nbThreads; i++) {
        futures[i].wait();
        ASSERT_TRUE(sharedData.isReferencing(threadsBuffer));
        ASSERT_TRUE(futures[i].get() == sharedData.toInt());
    }
}

class KeyValueNode {
    public:
        KeyValueNode(const DataChunk&& key, const DataChunk&& data) :
                key(std::move(key)), data(std::move(data)) {
        }

        const DataChunk& getKey() {
            return key;
        }
        const DataChunk& getData() {
            return data;
        }
        void setData(const DataChunk& d) {
            data.reference(d);
        }
    private:
        DataChunk key;
        DataChunk data;
};

} // namespace testDataChunk
Пример #15
0
size_t DataContainer::GetTotalByteSize() const {
	
	// Get the accumulated size of all DataChunks
	size_t sAccumulated = 0;

	for (size_t i = 0; i < m_vecDataChunks.size(); i++) {
		DataChunk * pDataChunk =
			reinterpret_cast<DataChunk*>(m_vecDataChunks[i]);

		// Verify byte alignment
		size_t sByteSize = pDataChunk->GetByteSize();
		if (sByteSize % sizeof(size_t) != 0) {
			_EXCEPTIONT("Misaligned array detected in DataContainer");
		}
		sAccumulated += sByteSize;
	}

	return sAccumulated;
}
Пример #16
0
int makeDataChunks(const int nbLoop, DataChunk& output) {
    int ret = 0;
    for (int i = 0; i < nbLoop; i++) {
        DataChunk chunk(threadsBuffer);
        ret = chunk.toInt();
        EXPECT_TRUE(chunk.isReferencing(threadsBuffer));
        output.reference(chunk);
    }
    return ret;
}
Пример #17
0
void Compressor::apply(DataChunk& samples){
    switch( samples.getFormat()->bitsPerSample()/8){
        case 2:
            maxint =32767;break;
        case 1:
            maxint = 127;
            break;
    }
    b = (1.0-a2)*maxint;
    DiskreteEffect::apply(samples);
}
Пример #18
0
void DiskreteEffect::apply(DataChunk &samples){
    unsigned short bytePerSample=samples.getFormat()->bitsPerSample()/8;
    unsigned int count=samples.getSize()/bytePerSample; //samples count
    char *data8 = samples.getData();
    short *data16 = (short*)data8;

    switch (bytePerSample) {
        case 1:
            maxint=127;
            for(unsigned int i=0;i<count;i++)
                data8[i]=apply(data8[i]-0x80)+0x80;
            break;
        case 2:
            maxint=32767;
            for(unsigned int i=0;i<count;i++)
                data16[i]=apply(data16[i]);
            break;
        default:
            throw "Такой битрейт пока не поддерживается";
    }
}
Пример #19
0
    void DirectXShader::compileShader(const std::shared_ptr<class DirectXRenderer>& renderer, DataChunk data, ID3DInclude* includes, const std::string& entryPoint, ShaderType type)
    {
        ComPtr<ID3D10Blob> errors;
        HRESULT hr = D3DCompile(data.data(), data.size(), "", NULL, includes, entryPoint.c_str(), getTarget(type), D3DCOMPILE_DEBUG | D3DCOMPILE_PREFER_FLOW_CONTROL | D3DCOMPILE_SKIP_OPTIMIZATION, 0, mBlob.getInitRef(), errors.getInitRef());
        if (hr != S_OK)
        {
            log("%s", errors->GetBufferPointer());
            runtimeCheck(false);
        }

        switch (type)
        {
        case ShaderType::Vertex:
            hr = renderer->getDevice()->CreateVertexShader(mBlob->GetBufferPointer(), mBlob->GetBufferSize(), nullptr, mVertexShader.getInitRef());
            TB::runtimeCheck(hr == S_OK);
            break;
        case ShaderType::Pixel:
            hr = renderer->getDevice()->CreatePixelShader(mBlob->GetBufferPointer(), mBlob->GetBufferSize(), nullptr, mPixelShader.getInitRef());
            TB::runtimeCheck(hr == S_OK);
            break;
        }

    }
Пример #20
0
void DataContainer::AttachTo(
	unsigned char * pAllocatedMemory
) {
	if (m_pAllocatedMemory != NULL) {
		_EXCEPTIONT("Attempting AttachTo() on attached DataContainer");
	}

	m_fOwnsData = false;
	m_pAllocatedMemory = pAllocatedMemory;

	// Assign memory to DataChunks
	unsigned char * pAccumulated = m_pAllocatedMemory;

	for (size_t i = 0; i < m_vecDataChunks.size(); i++) {

		DataChunk * pDataChunk =
			reinterpret_cast<DataChunk *>(m_vecDataChunks[i]);
		pDataChunk->AttachToData(
			reinterpret_cast<void *>(pAccumulated));

		pAccumulated += pDataChunk->GetByteSize();
	}
}
Пример #21
0
int ld_main(int argc, char **argv) {
  bool showHelp, mustResolveRefs(true);
  string outFileName("a.out.bin"), archString("8w32/32/8"), formatString("bin");
  Size nObjects;
  Addr binOffset(0);

  /* Get command line arguments. */
  CommandLineArgFlag          fh("-h", "--help", "", showHelp);
  CommandLineArgSetter<string>fa("-a", "--arch", "", archString);
  CommandLineArgSetter<string>ff("-f", "--format", "", formatString);
  CommandLineArgSetter<Addr>  foffset("--offset", "", binOffset);
  CommandLineArgSetter<string>fo("-o", "--output", "", outFileName);

  int firstInput(0), newArgc;
  for (size_t i = 0; i < argc; i++) {
    if (*(argv[i]) != '-') { firstInput = i; newArgc = i; break; }
    else if (string(argv[i]) == "--") { firstInput = i+1; newArgc = i; break; }
    else i++; /* Skip both the switch and its argument. */
  }
  nObjects = argc - firstInput;

  if (argc != 0) CommandLineArg::readArgs(newArgc, argv);

  if (argc == 0 || showHelp) {
    cout << Help::ldHelp;
    exit(0);
  }

  if (firstInput == argc) {
    cout << "Linker: no input files given.\n";
    exit(1);
  }

  ArchDef arch(archString);

  /* Read all of the objects, assign addresses to their chunks, and place them
     in an address map.*/
  vector<Obj *> objects(nObjects);
  vector<DataChunk*> chunks;
  map<string, Addr> gChunkMap;
  Addr nextOffset(binOffset);

  for (Size i = 0; i < nObjects; i++) {
    map <string, Addr> lChunkMap;

    /* Read the object. */
    HOFReader hr(arch);
    ifstream objFile(argv[firstInput + i]);
    if (!objFile) {
      cout << "Could not open \"" << argv[firstInput + i] 
           << "\" for reading.\n";
      exit(1);
    }
    objects[i] = hr.read(objFile);

    /* Assign addresses to chunks. */
    Obj &obj = *objects[i];
    for (Size j = 0; j < obj.chunks.size(); j++) {
      DataChunk *c = dynamic_cast<DataChunk*>(obj.chunks[j]);
      if (c->alignment != 0 && nextOffset % c->alignment)
        nextOffset += c->alignment - (nextOffset % c->alignment);
      c->bind(nextOffset);
      chunks.push_back(c);
      if (obj.chunks[j]->name != "") {
        if (c->isGlobal()) gChunkMap[c->name] = nextOffset;
        else               lChunkMap[c->name] = nextOffset;
      }
      nextOffset += (c->size);
    }

    /* Resolve local references. */
    for (Size i = 0; i < obj.chunks.size(); i++) {
      DataChunk *dc = dynamic_cast<DataChunk*>(obj.chunks[i]);
      for (Size j = 0; j < dc->refs.size(); j++) {
        Ref &ref = *(dc->refs[j]);
        if (lChunkMap.find(dc->refs[j]->name) != lChunkMap.end()) {
          dc->refs[j]->bind(lChunkMap[dc->refs[j]->name],
                            dc->address + dc->refs[j]->ibase);
        }
      }
    }
  }

  /* Resolve references. */
  for (Size i = 0; i < chunks.size(); i++) {
    DataChunk *dc = chunks[i];
    for (Size j = 0; j < dc->refs.size(); j++) {
      Ref &ref = *(dc->refs[j]);
      if (!ref.bound && (gChunkMap.find(ref.name) != gChunkMap.end())) {
        ref.bind(gChunkMap[ref.name], dc->address + ref.ibase);
      } else if (!ref.bound && mustResolveRefs) {
        cout << "Undefined symbol: \"" << ref.name << "\"\n";
        exit(1);
      }
    }
  }  

  /* Write out the chunks. */
  ofstream outFile(outFileName.c_str());
  for (Size i = 0; i < chunks.size(); i++) {
    if (outFile.tellp() > chunks[i]->address - binOffset) {
      cout << "Linker internal error. Wrote past next chunk address.\n";
      exit(1);
    }
    while (outFile.tellp() < chunks[i]->address - binOffset) outFile.put('\0');
    outFile.seekp(chunks[i]->address - binOffset);
    outFile.write((char*)&chunks[i]->contents[0], chunks[i]->contents.size());
  }

  /* Clean up. */
  for (Size i = 0; i < nObjects; i++) delete objects[i];

  return 0;
}
Пример #22
0
/*! \param Buffer Pointer to data to be written
 *  \param Offset The offset within the KLV value field of the first byte to write
 *  \param Size The number of bytes to write
 *  \return Size if all OK, else != Size.  This may not equal the actual number of bytes written.
 *	The IV must have already been set.
 *  Only encrypted parts of the value may be written using this function (i.e. Offset >= PlaintextOffset)
 */
size_t KLVEObject::WriteCryptoDataTo(const UInt8 *Buffer, Position Offset, size_t Size)
{
	// Self-deleting store to allow us to extend working buffer if required
	DataChunk TempData;

	// Are we going to need to write padding bytes?
	bool AddPadding;
	if(static_cast<Length>(Offset + Size) >= ValueLength) AddPadding = true; else AddPadding = false;

	// Check if this is a "pointless" zero-byte write (rather than a request to write padding only)
	if((!AddPadding) && (Size == 0)) return 0;

	// Assume that the write will succeed and move the "next" pointer accordingly
	CurrentWriteOffset += Size;

	// Check if all the bytes will fit in the AwaitingEncryptionBuffer
	if((!AddPadding) && (Size < static_cast<size_t>(EncryptionGranularity - AwaitingEncryption)))
	{
		// Add to the end of the waiting buffer
		memcpy(&AwaitingEncryptionBuffer[AwaitingEncryption], Data.Data, Size);

		// All done
		return Size;
	}

	// Work out how many bytes we need to encrypt (estimate one - as many as offered)
	Length BytesToEncrypt = Size;

	// If there are any bytes waiting they need to be added to this write
	if(AwaitingEncryption)
	{
		// Build the full data in the temporary buffer
		TempData.ResizeBuffer(AwaitingEncryption + Size);

		// Start with "waiting" data
		TempData.Set(AwaitingEncryption, AwaitingEncryptionBuffer);

		// Copy in the new data
		TempData.Append(Size, Buffer);

		// Replace the working buffer pointer with a pointer to the temporary buffer
		Buffer = TempData.Data;

		// Update the offset (move it back to the first waiting byte)
		Offset -= AwaitingEncryption;

		// Record the revised size
		BytesToEncrypt = AwaitingEncryption + Size;
	}

	// Pad the data if required (i.e. if this is the last chunk of data)
	if(AddPadding)
	{
		if(Offset + BytesToEncrypt > ValueLength)
		{
			error("Attempted to write beyond the end of the encrypted value in KLVEObject::WriteCryptoDataTo()\n");
			return 0;
		}

		// Start by encrypting all but the last 16 bytes (including padding)
		Length StartSize = EncryptedLength - (EncryptionGranularity + Offset);

		// Sanity check the size of this chunk
		if((sizeof(size_t) < 8) && (StartSize > 0xffffffff))
		{
			error("Tried to encrypt > 4GBytes, but this platform can only handle <= 4GByte chunks\n");
			return 0;
		}

		// Don't write zero bytes
		if(StartSize)
		{
			// Encrypt by making a copy
			DataChunkPtr NewData = Encrypt->Encrypt(static_cast<size_t>(StartSize), Buffer);
			if(!NewData) return 0;

			// Write the encrypted data
			Base_WriteDataTo(NewData->Data, DataOffset + Offset, static_cast<size_t>(StartSize));

			// Update the current hash if we are calculating one
			// TODO: Sort the possible overflow here
			if(WriteHasher) WriteHasher->HashData(static_cast<size_t>(StartSize), NewData->Data);
		}

		// Buffer for last data to be encrypted
		UInt8 TempBuffer[EncryptionGranularity];

		// Copy in the remaining bytes from the end of the given buffer
		// Add padding bytes in a 16-byte version of the scheme defined in RFC 2898
		const UInt8 *pSrc = &Buffer[StartSize];
		UInt8 *pDst = TempBuffer;
		int i;
		int EncData = (int)(BytesToEncrypt - StartSize);
		int Pad = EncryptionGranularity - EncData;
		for(i=0; i<16; i++)
		{
			if(i < EncData) *(pDst++) = *(pSrc++);
			else *(pDst++) = (UInt8)Pad;
		}

		// Encrypt by making a copy
		DataChunkPtr NewData = Encrypt->Encrypt(EncryptionGranularity, TempBuffer);
		if(!NewData) return 0;

		// Write the encrypted data
		Base_WriteDataTo(NewData->Data, DataOffset + Offset + StartSize, EncryptionGranularity);

		// Update the current hash if we are calculating one
		if(WriteHasher) WriteHasher->HashData(EncryptionGranularity, NewData->Data);

		// There are no more bytes to encrypt
		AwaitingEncryption = 0;

		// Lie and say we wrote the requested number of bytes (tells the caller all was fine)
		return Size;
	}

	// Work out how many bytes have to be encrypted this time (an integer number of chunks)
	Length BytesRequiringEncryption = BytesToEncrypt;
	BytesToEncrypt = BytesToEncrypt / EncryptionGranularity;
	BytesToEncrypt *= EncryptionGranularity;

	// Sanity check the size of this chunk
	if((sizeof(size_t) < 8) && (BytesToEncrypt > 0xffffffff))
	{
		error("Tried to encrypt > 4GBytes, but this platform can only handle <= 4GByte chunks\n");
		return 0;
	}

	// Any differnece will be "left-over"
	AwaitingEncryption = static_cast<int>(BytesRequiringEncryption - BytesToEncrypt);

	// Encrypt by making a copy
	DataChunkPtr NewData = Encrypt->Encrypt(static_cast<size_t>(BytesToEncrypt), Buffer);
	if(!NewData) return 0;

	// If there will be any "left-over" bytes they will be awaiting next time
	if(AwaitingEncryption)
	{
		memcpy(AwaitingEncryptionBuffer, &Buffer[BytesToEncrypt], AwaitingEncryption);
	}

	// Write the encrypted data
	Size = Base_WriteDataTo(NewData->Data, DataOffset + Offset, static_cast<size_t>(BytesToEncrypt));

	// Update the current hash if we are calculating one
	// TODO: Sort the possible overflow here
	if(WriteHasher) WriteHasher->HashData(Size, NewData->Data);

	// Chain the IV for next time...
	EncryptionIV = Encrypt->GetIV();

	// Lie and say we wrote the requested number of bytes (tells the caller all was fine)
	return Size;
}
Пример #23
0
	bool BoneAnimationSet::write(string filename,float fCopress)
	{
		//filename = m_strName+"1";
		Stream *pDataStream;
		DataChunk writeChunk;

		//最新版本是7,把所有旧的版本转换为版本4
		//写 'MVER'
		writeChunk.beginChunk('MVER', &pDataStream);
		uint ver = 4 ;//在导出插件里面计算aabb
		pDataStream->write(&ver,sizeof(ver));
		writeChunk.endChunk();

		//写 'MANM'
		uint32 nAnims = m_vAnimations.size();
		//add by yhc 2010.5.10
		//修正前一个动作的结束时间和后一个动作的开始时间一样的饿问题
		bool bFixWrongTime = false;
		if( nAnims > 0)
		{
			if( nAnims>1 && m_vAnimations[0]->getTimeEnd()==m_vAnimations[1]->getTimeStart() )
				bFixWrongTime = true;

			writeChunk.beginChunk('MANM', &pDataStream);
			pDataStream->write( &nAnims, sizeof(nAnims) );
			for( uint i =0; i < nAnims; i++)
			{
				string animname = m_vAnimations[i]->getName();
				uchar animnameLen = animname.size();
				pDataStream->write(&animnameLen, sizeof( animnameLen) );
				pDataStream->write(animname.c_str(), animnameLen);
				uint startTime,endTime;
				startTime = m_vAnimations[i]->getTimeStart();
				if(bFixWrongTime)
					startTime += i*33;

				endTime = m_vAnimations[i]->getTimeEnd();
				if(bFixWrongTime)
					endTime += i*33;
				pDataStream->write(&startTime, sizeof(startTime));
				pDataStream->write(&endTime, sizeof(endTime));
			}
			writeChunk.endChunk();
		}

		//写 'MBON'
		uint nBones = m_vBones.size();
		if( nBones > 0)
		{
			writeChunk.beginChunk('MBON', &pDataStream);
			pDataStream->write( &nBones, sizeof(nBones) );
			for( uint i = 0; i < nBones; i++)
			{
				int id  = m_vBones[i]->objectId;
				pDataStream->write( &id, sizeof(id));
				string Jointname = m_vBones[i]->name;
				uchar JointnameLen = Jointname.size();
				pDataStream->write( &JointnameLen, sizeof(JointnameLen) );
				pDataStream->write( Jointname.c_str(), JointnameLen);
				int parent = m_vBones[i]->parentId;
				pDataStream->write( &parent, sizeof(parent) );
				Vector3 pivot = m_vBones[i]->pivotPoint;
				pDataStream->write( &pivot, sizeof(pivot) );

				//写骨骼的初始变换矩阵
				pDataStream->write(m_vBones[i]->initTrans.val,sizeof(float)*3);
				float _r[4];
				_r[0] = m_vBones[i]->initQuat.x;
				_r[1] = m_vBones[i]->initQuat.y;
				_r[2] = m_vBones[i]->initQuat.z;
				_r[3] = m_vBones[i]->initQuat.w;
				pDataStream->write(_r, sizeof(float)*4);
				pDataStream->write(m_vBones[i]->initScale.val, sizeof(float)*3);

				KeyFrames<Vector3> translation;
				KeyFrames<Quaternion> rotation;
				KeyFrames<Vector3> scale;
				
				//去掉重复和多余的keyframe 
				CompressPos(m_vBones[i]->translation,translation,fCopress);
				uint nKeyFrames = translation.numKeyFrames();
				//修正时间
				if(bFixWrongTime)
				{
					int nCurAniIndex=0;
					for( uint ii = 0; ii < nKeyFrames; ii++)
					{
						KeyFrame<Vector3> * kf =  translation.getKeyFrame(ii);
						if( kf->time>m_vAnimations[nCurAniIndex]->getTimeEnd() )
							nCurAniIndex++;
						kf->time += nCurAniIndex*33;
					}

				}
				pDataStream->write( &nKeyFrames, sizeof(nKeyFrames) );
				for( uint ii = 0; ii < nKeyFrames; ii++)
				{
					ModelKeyframeTranslation mkft;
					mkft.time = translation.getKeyFrame(ii)->time;
					mkft.v[0] = translation.getKeyFrame(ii)->v[0];
					mkft.v[1] = translation.getKeyFrame(ii)->v[1];
					mkft.v[2] = translation.getKeyFrame(ii)->v[2];
					pDataStream->write( &mkft, sizeof(mkft) );
				} 
				
				CompressQua(m_vBones[i]->rotation,rotation,fCopress);
				nKeyFrames = rotation.numKeyFrames();
				//修正时间
				if(bFixWrongTime)
				{
					int nCurAniIndex=0;
					for( uint ii = 0; ii < nKeyFrames; ii++)
					{
						KeyFrame<Quaternion> * kf =  rotation.getKeyFrame(ii);
						if( kf->time>m_vAnimations[nCurAniIndex]->getTimeEnd() )
							nCurAniIndex++;
						kf->time += nCurAniIndex*33;
					}
				}

				pDataStream->write( &nKeyFrames, sizeof(nKeyFrames) );
				for( uint ii = 0; ii < nKeyFrames; ii++)
				{
					ModelKeyframeRotation mkfr;
					mkfr.time = rotation.getKeyFrame(ii)->time;
					mkfr.q[0] = rotation.getKeyFrame(ii)->v.x;
					mkfr.q[1] = rotation.getKeyFrame(ii)->v.y;
					mkfr.q[2] = rotation.getKeyFrame(ii)->v.z;
					mkfr.q[3] = rotation.getKeyFrame(ii)->v.w;
					pDataStream->write( &mkfr, sizeof(mkfr) );
				} 
				
				CompressPos(m_vBones[i]->scale,scale,fCopress);
				nKeyFrames = scale.numKeyFrames();
				//修正时间
				if(bFixWrongTime)
				{
					int nCurAniIndex=0;
					for( uint ii = 0; ii < nKeyFrames; ii++)
					{
						KeyFrame<Vector3> * kf =  scale.getKeyFrame(ii);
						if( kf->time>m_vAnimations[nCurAniIndex]->getTimeEnd() )
							nCurAniIndex++;
						kf->time += nCurAniIndex*33;
					}

				}
				pDataStream->write( &nKeyFrames, sizeof( nKeyFrames) );
				for( uint ii = 0; ii < nKeyFrames; ii ++)
				{
					ModelKeyframeScale mkfs;
					mkfs.time = scale.getKeyFrame(ii)->time;
					mkfs.v[0] = scale.getKeyFrame(ii)->v[0];
					mkfs.v[1] = scale.getKeyFrame(ii)->v[1];
					mkfs.v[2] = scale.getKeyFrame(ii)->v[2];
					pDataStream->write( &mkfs, sizeof(mkfs) );
				}
			
			}
			writeChunk.endChunk();		
		}

		writeChunk.save(filename.c_str());
		return true;
	}