Exemple #1
0
Sha1::Digest Sha1::digest() {
    /* Add '1' bit to the leftovers, pad to (n*64)+56 bytes */
    _buffer.append(1, 0x80);
    _buffer.append((_buffer.size() > 56 ? 120 : 56)- _buffer.size(), 0);

    /* Add size of data in bits in big endian */
    unsigned long long dataSizeBigEndian = Endianness::bigEndian<unsigned long long>(_dataSize*8);
    _buffer.append(reinterpret_cast<const char*>(&dataSizeBigEndian), 8);

    /* Process remaining chunks */
    for(std::size_t i = 0; i != _buffer.size()/64; ++i)
        processChunk(_buffer.data()+i*64);

    /* Convert digest from big endian */
    unsigned int digest[5];
    for(int i = 0; i != 5; ++i)
        digest[i] = Endianness::bigEndian<unsigned int>(_digest[i]);
    Digest d = Digest::fromByteArray(reinterpret_cast<const char*>(digest));

    /* Clear data and return */
    std::copy(initialDigest, initialDigest+5, _digest);
    _buffer.clear();
    _dataSize = 0;
    return d;
}
 string numberToWords(int num) {
     if (num == 0) return "Zero";
     int base=1000;
     init();
     vector<int> chunks;
     int tmp = num;
     while (tmp)
     {
         chunks.push_back(tmp%1000);
         tmp /= 1000;
     }
     vector<string> words;
     for (int i=chunks.size()-1; i>=0; --i)
     {
         vector<string> tmp = processChunk(chunks[i]);
         if (tmp.size() == 0)
             continue;
         else
         {
             for (auto s:tmp)
                 words.push_back(s);
             words.push_back(bases[i]);
         }
     }
     string ans;
     for (auto s:words)
       if (s != "")
         ans = ans + s + ' ';
     ans.resize(ans.size()-1);
     return ans;
 }
void SHA256::finalize(void *hash, size_t len)
{
    // Pad the last chunk.  We may need two padding chunks if there
    // isn't enough room in the first for the padding and length.
    uint8_t *wbytes = (uint8_t *)state.w;
    if (state.chunkSize <= (64 - 9)) {
        wbytes[state.chunkSize] = 0x80;
        memset(wbytes + state.chunkSize + 1, 0x00, 64 - 8 - (state.chunkSize + 1));
        state.w[14] = htobe32((uint32_t)(state.length >> 32));
        state.w[15] = htobe32((uint32_t)state.length);
        processChunk();
    } else {
Exemple #4
0
/** Writing thread.
 *
 * \param handle the stream to use the filters on
 * \return 1 if all the buffer chain has been processed, <1 otherwise
 */
static void*
bufferedWriterThread(void* handle)
{
  int allsent = 1;
  BufferedWriter* self = (BufferedWriter*)handle;
  BufferChunk* chunk = self->firstChunk;

  while (self->active) {
    oml_lock(&self->lock, __FUNCTION__);
    pthread_cond_wait(&self->semaphore, &self->lock);
    // Process all chunks which have data in them
    do {
      oml_unlock(&self->lock, __FUNCTION__);
      allsent = processChunk(self, chunk);

      oml_lock(&self->lock, __FUNCTION__);
      /* Stop if we caught up to the writer... */
      if (chunk == self->writerChunk) { break; }
      /* ...otherwise, move on to the next chunk */
      if (allsent>0) {
        chunk = getNextReadChunk(self);
      }
    } while(allsent > 0);
    oml_unlock(&self->lock, __FUNCTION__);
  }
  /* Drain this writer before terminating */
  /* XXX: “Backing-off for ...” messages might confuse the user as
   * we don't actually wait after a failure when draining at the end */
  while ((allsent=processChunk(self, chunk))>=-1) {
    if(allsent>0) {
      if (chunk == self->writerChunk) { break; };

      oml_lock(&self->lock, __FUNCTION__);
      chunk = getNextReadChunk(self);
      oml_unlock(&self->lock, __FUNCTION__);
    }
  };
  self->retval = allsent;
  pthread_exit(&(self->retval));
}
/** Execute the algorithm.
 */
void LoadEventAndCompress::exec() {
  std::string filename = getPropertyValue("Filename");
  double filterBadPulses = getProperty("FilterBadPulses");

  m_chunkingTable = determineChunk(filename);

  Progress progress(this, 0, 1, 2);

  // first run is free
  progress.report("Loading Chunk");
  MatrixWorkspace_sptr resultWS = loadChunk(0);
  progress.report("Process Chunk");
  resultWS = processChunk(resultWS, filterBadPulses);

  // load the other chunks
  const size_t numRows = m_chunkingTable->rowCount();

  progress.resetNumSteps(numRows, 0, 1);

  for (size_t i = 1; i < numRows; ++i) {
    MatrixWorkspace_sptr temp = loadChunk(i);
    temp = processChunk(temp, filterBadPulses);
    auto plusAlg = createChildAlgorithm("Plus");
    plusAlg->setProperty("LHSWorkspace", resultWS);
    plusAlg->setProperty("RHSWorkspace", temp);
    plusAlg->setProperty("OutputWorkspace", resultWS);
    plusAlg->setProperty("ClearRHSWorkspace", true);
    plusAlg->executeAsChildAlg();
    resultWS = plusAlg->getProperty("OutputWorkspace");

    progress.report();
  }
  Workspace_sptr total = assemble(resultWS);

  // Don't bother compressing combined workspace. DetermineChunking is designed
  // to prefer loading full banks so no further savings should be available.

  setProperty("OutputWorkspace", total);
}
Exemple #6
0
Sha1& Sha1::operator<<(const std::string& data) {
    /* Process leftovers */
    if(!_buffer.empty()) {
        /* Not enough large, try it next time */
        if(data.size()+ _buffer.size() < 64) {
            _buffer.append(data);
            return *this;
        }

        _buffer.append(data.substr(0, 64- _buffer.size()));
        processChunk(_buffer.data());
    }

    for(std::size_t i = _buffer.size(); i != data.size()/64; ++i)
        processChunk(data.data()+i*64);

    /* Save last unfinished 512-bit chunk of data */
    if(data.size()%64 != 0) _buffer = data.substr((data.size()/64)*64);
    else _buffer = {};

    _dataSize += data.size();
    return *this;
}
Exemple #7
0
//template <class UserData, class Base>
//bool ChunkReader<UserData,Base>::process(flowvr::Message msg, int iter)
bool ChunkReader::process(flowvr::Message msg, int iter)
{
  if (iter<0) ++iteration;
  else iteration = iter;
  ChunkIterator it = chunkBegin(msg);
  ChunkIterator end = chunkEnd(msg);
  bool res = true;
  while (it != end)
  {
    if (!processChunk((MsgChunk<Chunk>)it))
      res = false;
    it++;
  }
  return res;
}
Exemple #8
0
void processReceive(ENetEvent& evt) {
	switch (evt.channelID) {
	case narf::net::CHAN_CHAT:
		processChat(evt);
		break;
	case narf::net::CHAN_PLAYERCMD:
		processPlayerCmd(evt);
		break;
	case narf::net::CHAN_CHUNK:
		processChunk(evt);
		break;
	case narf::net::CHAN_ENTITY:
		processEntity(evt);
		break;
	}
}
Exemple #9
0
bool PlaybackFile::parseHeader() {
	PlaybackFileHeader result;
	ChunkHeader nextChunk;
	_playbackParseState = kFileStateCheckFormat;
	if (!readChunkHeader(nextChunk)) {
		_playbackParseState = kFileStateError;
		return false;
	}
	while ((_playbackParseState != kFileStateDone) && (_playbackParseState != kFileStateError)) {
		if (processChunk(nextChunk)) {
			if (!readChunkHeader(nextChunk)) {
				warning("Error in header parsing");
				_playbackParseState = kFileStateError;
			}
		}
	}
	return _playbackParseState == kFileStateDone;
}
Exemple #10
0
//template <class UserData, class Base>
//bool ChunkReader<UserData,Base>::processChunk(const MsgChunk<Chunk>& data)
bool ChunkReader::processChunk(const MsgChunk<Chunk>& data)
{

  if (data.getSize()==0) return ERRMSG("Empty chunk");

  switch (data->type)
  {

  case Chunk::INVALID:
  {
    return ERRMSG("Unknown chunk type "<<data->type);

  }

  default:
    MsgChunk<Chunk> c(data);  // discover what shoul be here
    return processChunk(c);
  }

}
void SHA256::update(const void *data, size_t len)
{
    // Update the total length (in bits, not bytes).
    state.length += ((uint64_t)len) << 3;

    // Break the input up into 512-bit chunks and process each in turn.
    const uint8_t *d = (const uint8_t *)data;
    while (len > 0) {
        uint8_t size = 64 - state.chunkSize;
        if (size > len)
            size = len;
        memcpy(((uint8_t *)state.w) + state.chunkSize, d, size);
        state.chunkSize += size;
        len -= size;
        d += size;
        if (state.chunkSize == 64) {
            processChunk();
            state.chunkSize = 0;
        }
    }
}
Exemple #12
0
void Renderer::run()
{
	NBT_Debug("begin");

	al_hide_mouse_cursor(dpy_);

	al_identity_transform(&camera_transform_);

	float x = -camera_pos_.x, y = -camera_pos_.y, z = -camera_pos_.z;
	//x = -dim0_->spawnX();
	//z = -dim0_->spawnZ();

	al_translate_transform_3d(&camera_transform_, x, y, z);

	al_rotate_transform_3d(&camera_transform_, 0.0, 1.0, 0.0, DEG_TO_RAD(180));

	memset(key_state_, 0, sizeof(key_state_) * sizeof(key_state_[0]));

	al_start_timer(tmr_);

	NBT_Debug("run!");

	//al_use_shader(nullptr);

	/*ALLEGRO_TRANSFORM trans;
	al_identity_transform(&trans);
	al_orthographic_transform(&trans, 0, 0, -1, al_get_display_width(dpy_), al_get_display_height(dpy_), 1);
	al_set_projection_transform(dpy_, &trans);
	al_identity_transform(&trans);
	al_use_transform(&trans);

	if(!resManager_->getAtlas()->getSheet(0)->alBitmap())
		NBT_Debug("no sheet bitmap????");
	*/
	//al_draw_bitmap(resManager_->getAtlas()->getSheet(0)->alBitmap(), 0, 0, 0);

	//al_flip_display();
	//sleep(10);

	bool redraw = false;
	bool doUpdateLookPos = false;
	bool cleared = false;
	
	while(1)
	{
		ALLEGRO_EVENT ev;
      al_wait_for_event(queue_, &ev);

      if(ev.type == ALLEGRO_EVENT_TIMER)
		{
         redraw = true;
			//cam_.rx = 1.0;
			float x = 0.0, y = 0.0, z = 0.0;
			float translate_diff = 0.3;
			float ry = 0.0;
			float rotate_diff = 0.04;
			bool changeTranslation = false;
			bool changeRotation = false;

			if(key_state_[ALLEGRO_KEY_W])
			{
				z += translate_diff;
				changeTranslation = true;
			}

			if(key_state_[ALLEGRO_KEY_S])
			{
				z -= translate_diff;
				changeTranslation = true;
			}

			if(key_state_[ALLEGRO_KEY_A])
			{
				x += translate_diff;
				changeTranslation = true;
			}

			if(key_state_[ALLEGRO_KEY_D])
			{
				x -= translate_diff;
				changeTranslation = true;
			}

			if(key_state_[ALLEGRO_KEY_SPACE])
			{
				y -= translate_diff;
				changeTranslation = true;
			}

			if(key_state_[ALLEGRO_KEY_LSHIFT])
			{
				y += translate_diff;
				changeTranslation = true;
			}

			if(key_state_[ALLEGRO_KEY_LEFT])
			{
				ry += rotate_diff;
				changeRotation = true;
			}

			if(key_state_[ALLEGRO_KEY_RIGHT])
			{
				ry -= rotate_diff;
				changeRotation = true;
			}

			if(changeTranslation)
			{
				//camera_pos_.translate(x, y, z);
				al_translate_transform_3d(&camera_transform_, x, y, z);
				doUpdateLookPos = true;
			}

			if(changeRotation)
			{
				al_rotate_transform_3d(&camera_transform_, 0.0, 1.0, 0.0, ry);
				doUpdateLookPos = true;
			}

			if(doUpdateLookPos)
				updateLookPos();

      }
      else if(ev.type == ALLEGRO_EVENT_DISPLAY_CLOSE)
		{
			NBT_Debug("display close");
         break;
      }
      else if(ev.type == ALLEGRO_EVENT_KEY_DOWN)
		{
			//NBT_Debug("key down");
			//NBT_Debug("pos: %fx%f", -camera_transform_.m[2][0], -camera_transform_.m[2][2]);
			key_state_[ev.keyboard.keycode] = true;

			if (ev.keyboard.keycode == ALLEGRO_KEY_Q)
			{
				break;
			}
			else if(ev.keyboard.keycode == ALLEGRO_KEY_C)
			{
				NBT_Debug("CLEAR CHUNKS");
				glBindVertexArray(vao_);
				for(auto ch: chunkData_)
				{
					delete ch.second;
				}
				glBindVertexArray(0);
				chunkData_.clear();

				glDeleteBuffers(1, &vao_);
				
				cleared = true;
			}
			else if (ev.keyboard.keycode == ALLEGRO_KEY_ESCAPE)
			{
				grab_mouse_ = !grab_mouse_;
			}
		}
		else if(ev.type == ALLEGRO_EVENT_KEY_UP)
		{
			//NBT_Debug("pos: %fx%f", -camera_transform_.m[2][0], -camera_transform_.m[2][2]);
			key_state_[ev.keyboard.keycode] = false;
		}
		else if(ev.type == ALLEGRO_EVENT_MOUSE_BUTTON_UP)
		{
				grab_mouse_ = true;
		}
		else if(ev.type == ALLEGRO_EVENT_MOUSE_AXES && grab_mouse_)
		{
			float dx = ev.mouse.dx, dy = ev.mouse.dy;

			if(dy > 0 && dy < 1.5)
				dy = 0.0;

			if(dy < 0 && dy > -1.5)
				dy = 0.0;

			if(dx > 0 && dx < 1.5)
				dy = 0.0;

			if(dx < 0 && dx > -1.5)
				dx = 0.0;

			float ry = dx / al_get_display_width(dpy_), rx = dy / al_get_display_height(dpy_);

			rx_look += rx;
			al_rotate_transform_3d(&camera_transform_, 0.0, 1.0, 0.0, ry);
//			al_rotate_transform_3d(&camera_transform_, 1.0, 0.0, 0.0, rx);

			//cam_.rx += dy / al_get_display_height(dpy_);

			al_set_mouse_xy(dpy_, al_get_display_width(dpy_)/2.0, al_get_display_height(dpy_)/2.0);

			doUpdateLookPos = true;
		}

      if(redraw && al_is_event_queue_empty(queue_))
		{
			if(!loadChunkQueue.empty())
			{
				NBT_Debug("%i chunks to load", loadChunkQueue.size());

				std::pair<int32_t, int32_t> pos = loadChunkQueue.front();
				loadChunkQueue.pop();

				processChunk(pos.first, pos.second);
			}
			else
			{
				if(!cleared)
				{
					//NBT_Debug("pos: %fx%fx%f", camera_pos_.getX(), camera_pos_.getZ(), camera_pos_.getY());
					autoLoadChunks(camera_pos_.getX() / 16.0, camera_pos_.getZ() / 16.0);
				}
			}

			ALLEGRO_STATE state;
			al_store_state(&state, ALLEGRO_STATE_ALL);
			al_set_projection_transform(dpy_, &al_proj_transform_);

			glClear(GL_DEPTH_BUFFER_BIT);

         redraw = false;
			al_clear_to_color(al_map_rgb(255,255,255));
         draw();

			al_restore_state(&state);
			al_set_projection_transform(dpy_, &al_proj_transform_);

			drawHud();

			al_restore_state(&state);
         al_flip_display();
      }


	}

	NBT_Debug("stop timer");
	al_stop_timer(tmr_);

	NBT_Debug("end");


	NBT_Debug("sizeof GL_FLOAT: %i", sizeof(GLfloat));
}
Exemple #13
0
unsigned int searchProject(Output* output_, const char* file, const char* string, unsigned int options, unsigned int limit, const char* include, const char* exclude)
{
	SearchOutput output(output_, options, limit);
	std::unique_ptr<Regex> regex(createRegex(string, getRegexOptions(options)));
	std::unique_ptr<Regex> includeRe(include ? createRegex(include, RO_IGNORECASE) : 0);
	std::unique_ptr<Regex> excludeRe(exclude ? createRegex(exclude, RO_IGNORECASE) : 0);
	NgramRegex ngregex((options & SO_BRUTEFORCE) ? nullptr : regex.get());
	
	std::string dataPath = replaceExtension(file, ".qgd");
	FileStream in(dataPath.c_str(), "rb");
	if (!in)
	{
		output_->error("Error reading data file %s\n", dataPath.c_str());
		return 0;
	}
	
	DataFileHeader header;
	if (!read(in, header) || memcmp(header.magic, kDataFileHeaderMagic, strlen(kDataFileHeaderMagic)) != 0)
	{
		output_->error("Error reading data file %s: malformed header\n", dataPath.c_str());
		return 0;
	}

	{
		unsigned int chunkIndex = 0;

		// Assume 50% compression ratio (it's usually much better)
		BlockPool chunkPool(kChunkSize * 3 / 2);

		std::vector<unsigned char> index;
		DataChunkHeader chunk;

		WorkQueue queue(WorkQueue::getIdealWorkerCount(), kMaxQueuedChunkData);

		while (!output.isLimitReached() && read(in, chunk))
		{
			if (ngregex.empty() || chunk.indexSize == 0)
			{
				in.skip(chunk.indexSize);
			}
			else
			{
				try
				{
					index.resize(chunk.indexSize);
				}
				catch (const std::bad_alloc&)
				{
					output_->error("Error reading data file %s: malformed chunk\n", dataPath.c_str());
					return 0;
				}

				if (chunk.indexSize && !read(in, &index[0], chunk.indexSize))
				{
					output_->error("Error reading data file %s: malformed chunk\n", dataPath.c_str());
					return 0;
				}

				if (!ngregex.match(index, chunk.indexHashIterations))
				{
					in.skip(chunk.compressedSize);
					continue;
				}
			}

			std::shared_ptr<char> data = chunkPool.allocate(chunk.compressedSize + chunk.uncompressedSize, std::nothrow);

			if (!data || !read(in, data.get(), chunk.compressedSize))
			{
				output_->error("Error reading data file %s: malformed chunk\n", dataPath.c_str());
				return 0;
			}

			queue.push([=, &regex, &output, &includeRe, &excludeRe]() {
				char* compressed = data.get();
				char* uncompressed = data.get() + chunk.compressedSize;

				decompress(uncompressed, chunk.uncompressedSize, compressed, chunk.compressedSize);
				processChunk(regex.get(), &output, chunkIndex, uncompressed, chunk.fileCount, includeRe.get(), excludeRe.get());
			}, chunk.compressedSize + chunk.uncompressedSize);

			chunkIndex++;
		}
	}

	return output.output.getLineCount();
}