Esempio n. 1
0
    /// Enqueues a command to copy data from \p src_buffer to
    /// \p dst_buffer.
    ///
    /// \see_opencl_ref{clEnqueueCopyBuffer}
    ///
    /// \see copy()
    event enqueue_copy_buffer(const buffer &src_buffer,
                              const buffer &dst_buffer,
                              size_t src_offset,
                              size_t dst_offset,
                              size_t size,
                              const wait_list &events = wait_list())
    {
        BOOST_ASSERT(m_queue != 0);
        BOOST_ASSERT(src_offset + size <= src_buffer.size());
        BOOST_ASSERT(dst_offset + size <= dst_buffer.size());
        BOOST_ASSERT(src_buffer.get_context() == this->get_context());
        BOOST_ASSERT(dst_buffer.get_context() == this->get_context());

        event event_;

        cl_int ret = clEnqueueCopyBuffer(
            m_queue,
            src_buffer.get(),
            dst_buffer.get(),
            src_offset,
            dst_offset,
            size,
            events.size(),
            events.get_event_ptr(),
            &event_.get()
        );

        if(ret != CL_SUCCESS){
            BOOST_THROW_EXCEPTION(opencl_error(ret));
        }

        return event_;
    }
Esempio n. 2
0
 void
 checkInflate(buffer const& input, buffer const& original)
 {
     for(std::size_t i = 0; i < input.size(); ++i)
     {
         buffer output(original.size());
         inflate_stream zs;
         zs.avail_in = 0;
         zs.next_in = 0;
         zs.next_out = output.data();
         zs.avail_out = output.capacity();
         if(i > 0)
         {
             zs.next_in = (Byte*)input.data();
             zs.avail_in = i;
             auto result = zs.write(Z_FULL_FLUSH);
             expect(result == Z_OK);
         }
         zs.next_in = (Byte*)input.data() + i;
         zs.avail_in = input.size() - i;
         auto result = zs.write(Z_FULL_FLUSH);
         output.resize(output.capacity() - zs.avail_out);
         expect(result == Z_OK);
         expect(output.size() == original.size());
         expect(std::memcmp(
             output.data(), original.data(), original.size()) == 0);
     }
 }
Esempio n. 3
0
	buffer(buffer const& b)
		: m_begin(0)
		, m_end(0)
		, m_last(0)
	{
		if (b.size() == 0) return;
		resize(b.size());
		std::memcpy(m_begin, b.begin(), b.size());
	}
Esempio n. 4
0
	buffer(buffer const& b)
		: m_begin(0)
		, m_size(0)
		, m_capacity(0)
	{
		if (b.size() == 0) return;
		resize(b.size());
		std::memcpy(m_begin, b.begin(), b.size());
	}
Esempio n. 5
0
/* Return true if there is j s.t. ssinfos[j] is marked as subsingleton,
   and it dependends of argument i */
static bool has_nonsubsingleton_fwd_dep(unsigned i, buffer<param_info> const & pinfos, buffer<ss_param_info> const & ssinfos) {
    lean_assert(pinfos.size() == ssinfos.size());
    for (unsigned j = i+1; j < pinfos.size(); j++) {
        if (ssinfos[j].is_subsingleton())
            continue;
        auto const & back_deps = pinfos[j].get_back_deps();
        if (std::find(back_deps.begin(), back_deps.end(), i) != back_deps.end()) {
            return true;
        }
    }
    return false;
}
Esempio n. 6
0
//Function to convert string of unsigned chars to string of chars
std::string cpl::crypt::blowfish::char2Hex(const buffer &charStr)
{
  std::string hexStr(charStr.size()*2, '\0');
  std::string hex;

	for(int i=0; i < static_cast<int>(charStr.size()); i++)
	{
    hex = convertChar2Hex(charStr[i]);
		copy(hex.begin(), hex.end(), hexStr.begin()+(i*2));
	}

  return hexStr;
}
Esempio n. 7
0
    /// Enqueues a command to fill \p buffer with \p pattern.
    ///
    /// \see_opencl_ref{clEnqueueFillBuffer}
    ///
    /// \opencl_version_warning{1,2}
    ///
    /// \see fill()
    event enqueue_fill_buffer(const buffer &buffer,
                              const void *pattern,
                              size_t pattern_size,
                              size_t offset,
                              size_t size,
                              const wait_list &events = wait_list())
    {
        BOOST_ASSERT(m_queue != 0);
        BOOST_ASSERT(offset + size <= buffer.size());
        BOOST_ASSERT(buffer.get_context() == this->get_context());

        event event_;

        cl_int ret = clEnqueueFillBuffer(
            m_queue,
            buffer.get(),
            pattern,
            pattern_size,
            offset,
            size,
            events.size(),
            events.get_event_ptr(),
            &event_.get()
        );

        if(ret != CL_SUCCESS){
            BOOST_THROW_EXCEPTION(opencl_error(ret));
        }

        return event_;
    }
Esempio n. 8
0
    /// Enqueues a command to write data from host memory to \p buffer.
    /// The copy is performed asynchronously.
    ///
    /// \see_opencl_ref{clEnqueueWriteBuffer}
    ///
    /// \see copy_async()
    event enqueue_write_buffer_async(const buffer &buffer,
                                     size_t offset,
                                     size_t size,
                                     const void *host_ptr,
                                     const wait_list &events = wait_list())
    {
        BOOST_ASSERT(m_queue != 0);
        BOOST_ASSERT(size <= buffer.size());
        BOOST_ASSERT(buffer.get_context() == this->get_context());
        BOOST_ASSERT(host_ptr != 0);

        event event_;

        cl_int ret = clEnqueueWriteBuffer(
            m_queue,
            buffer.get(),
            CL_FALSE,
            offset,
            size,
            host_ptr,
            events.size(),
            events.get_event_ptr(),
            &event_.get()
        );

        if(ret != CL_SUCCESS){
            BOOST_THROW_EXCEPTION(opencl_error(ret));
        }

        return event_;
    }
Esempio n. 9
0
    /// Enqueues a command to read data from \p buffer to host memory.
    ///
    /// \see_opencl_ref{clEnqueueReadBuffer}
    ///
    /// \see copy()
    void enqueue_read_buffer(const buffer &buffer,
                             size_t offset,
                             size_t size,
                             void *host_ptr,
                             const wait_list &events = wait_list())
    {
        BOOST_ASSERT(m_queue != 0);
        BOOST_ASSERT(size <= buffer.size());
        BOOST_ASSERT(buffer.get_context() == this->get_context());
        BOOST_ASSERT(host_ptr != 0);

        cl_int ret = clEnqueueReadBuffer(
            m_queue,
            buffer.get(),
            CL_TRUE,
            offset,
            size,
            host_ptr,
            events.size(),
            events.get_event_ptr(),
            0
        );

        if(ret != CL_SUCCESS){
            BOOST_THROW_EXCEPTION(opencl_error(ret));
        }
    }
Esempio n. 10
0
list<unsigned> fun_info_manager::collect_deps(expr const & type, buffer<expr> const & locals) {
    buffer<unsigned> deps;
    for_each(type, [&](expr const & e, unsigned) {
            if (m_ctx.is_tmp_local(e)) {
                unsigned idx;
                for (idx = 0; idx < locals.size(); idx++)
                    if (locals[idx] == e)
                        break;
                if (idx < locals.size() && std::find(deps.begin(), deps.end(), idx) == deps.end())
                    deps.push_back(idx);
            }
            return has_local(e); // continue the search only if e has locals
        });
    std::sort(deps.begin(), deps.end());
    return to_list(deps);
}
Esempio n. 11
0
 expr visit_projection(name const & fn, buffer<expr> const & args) {
     projection_info const & info = *get_projection_info(env(), fn);
     expr major = visit(args[info.m_nparams]);
     buffer<bool> rel_fields;
     name I_name = *inductive::is_intro_rule(env(), info.m_constructor);
     get_constructor_info(info.m_constructor, rel_fields);
     lean_assert(info.m_i < rel_fields.size());
     lean_assert(rel_fields[info.m_i]); /* We already erased irrelevant information */
     /* Adjust projection index by ignoring irrelevant fields */
     unsigned j = 0;
     for (unsigned i = 0; i < info.m_i; i++) {
         if (rel_fields[i])
             j++;
     }
     expr r;
     if (has_trivial_structure(I_name, rel_fields)) {
         lean_assert(j == 0);
         r = major;
     } else {
         r = mk_app(mk_proj(j), major);
     }
     /* Add additional arguments */
     for (unsigned i = info.m_nparams + 1; i < args.size(); i++)
         r = mk_app(r, visit(args[i]));
     return r;
 }
Esempio n. 12
0
/**
 * Received message callback. This function is executed to process the
 * received messages. If this is a valid message, the message is
 * dispatched to the handler defined by the user.
 *
 * @param buff Message byte buffer.
 * @param rbytes Size of the message.
 * @param ec Error code.
 */
void link::recv_handler(buffer<uint8>& buff, size_t rbytes,
						const boost::system::error_code& ec)
{
	if (ec) {
		mih::message pm;

		_handler(pm, ec);

	} else {
		mih::frame* fm = mih::frame::cast(buff.get(), rbytes);

		if (fm) {
			mih::message pm(*fm);

			_handler(pm, ec);
		}
	}

	void* rbuff = buff.get();
	size_t rlen = buff.size();

	_sock.async_receive(boost::asio::buffer(rbuff, rlen),
						boost::bind(&link::recv_handler,
									this,
									bind_rv(buff),
									boost::asio::placeholders::bytes_transferred,
									boost::asio::placeholders::error));
}
Esempio n. 13
0
static expr parse_notation_expr(parser & p, buffer<expr> const & locals) {
    auto pos = p.pos();
    expr r = p.parse_expr();
    r = abstract(r, locals.size(), locals.data());
    check_notation_expr(r, pos);
    return r;
}
Esempio n. 14
0
inline void device_buf::out(byte p)
{
	// TODO: If out_buf was resized so that it would be full at the next flush moment,
	// we only need to do one check here.
	out_buf.put(p);
	if(out_buf.size() > out_buffer_size)
		flush(); // Time to flush
}
Esempio n. 15
0
void cpl::crypt::blowfish::init( const buffer &ucKey ) {

  short keysize = ucKey.size();

  if (keysize<1)
    throw std::runtime_error("Incorrect key length");
	//Check the Key - the key length should be between 1 and 56 bytes
	if (keysize>56)
		keysize = 56;

  buffer aucLocalKey = ucKey;

	//Reflexive Initialization of the Blowfish.
	//Generating the Subkeys from the Key flood P and S boxes with PI
        std::memcpy(m_auiP, scm_auiInitP, sizeof m_auiP);
        std::memcpy(m_auiS, scm_auiInitS, sizeof m_auiS);

	//Load P boxes with key bytes
  unsigned int x=0;
	//Repeatedly cycle through the key bits until the entire P array has been XORed with key bits

  int iCount = 0;

	for(int i=0; i<18; i++)
	{
		x=0;
		for(int n=4; n--; )
		{
			x <<= 8;
			x |= aucLocalKey[iCount];

			iCount++;
			if(iCount == keysize)
			{ //All key bytes used, so recycle 
				iCount = 0;
			}
		}
		m_auiP[i] ^= x;
	}

	//Reflect P and S boxes through the evolving Blowfish
	block b(0UL,0UL); //all-zero block
	for(int i=0; i<18; )
  {
		encrypt(b);
    m_auiP[i++] = b.m_uil;
    m_auiP[i++] = b.m_uir;
  }

	for(int j=0; j<4; j++)
		for(int k=0; k<256; )
    {
			encrypt(b);
      m_auiS[j][k++] = b.m_uil;
      m_auiS[j][k++] = b.m_uir;
    }
}
Esempio n. 16
0
character A_upper_calltype A_upper_input(bool writeback, character c, buffer& ib) {
	if (writeback) {
		ib.push_front(c); 
		return 0; 
	} else {
		if (ib.size() > 0) return ib.pop_front(); 
		return (character)fgetc(stdin); 
	}
}
Esempio n. 17
0
stream::state_t
socks5_proxy::send( buffer &in_buf, buffer &out_buf )
{
	if ( m_state == stream::state_t::handshaking )
	{
		if ( !m_sent_greeting )
		{
			if ( proxy::manager::shared().authorization().size() > 0 )
			{
				out_buf.size( 4 );
			
				out_buf.put( 0, 0x05 );
				out_buf.put( 1, 0x02 );
				out_buf.put( 2, 0x00 );
				out_buf.put( 3, 0x02 );
			}
			else
			{
				out_buf.size( 3 );
				
				out_buf.put( 0, 0x05 );
				out_buf.put( 1, 0x01 );
				out_buf.put( 2, 0x00 );
			}
			
			m_sent_greeting = true;
		}
		
		m_send_queue.append( in_buf );
		
		m_state5 = waiting_for_opening_response;
		m_state = stream::state_t::connected;
	}
	else if ( m_state5 != connected )
	{
		m_send_queue.append( in_buf );
	}
	else
	{
		out_buf = std::move( in_buf );
	}
	
	return m_state;
}
Esempio n. 18
0
 /* Collect (and sort) dependencies of collected parameters */
 void collect_and_normalize_dependencies(buffer<expr> & norm_params) {
     name_map<expr> new_types;
     for (unsigned i = 0; i < m_params.size(); i++) {
         expr x = m_params[i];
         expr new_type = collect(m_ctx.instantiate_mvars(m_ctx.infer(x)));
         new_types.insert(mlocal_name(x), new_type);
     }
     local_context const & lctx = m_ctx.lctx();
     std::sort(m_params.begin(), m_params.end(), [&](expr const & l1, expr const & l2) {
             return lctx.get_local_decl(l1)->get_idx() < lctx.get_local_decl(l2)->get_idx();
         });
     for (unsigned i = 0; i < m_params.size(); i++) {
         expr x         = m_params[i];
         expr type      = *new_types.find(mlocal_name(x));
         expr new_type  = replace_locals(type, i, m_params.data(), norm_params.data());
         expr new_param = m_ctx.push_local(local_pp_name(x), new_type, local_info(x));
         norm_params.push_back(new_param);
     }
 }
Esempio n. 19
0
std::string comma_list(const buffer& buf)
{
	std::string str;
	if (buf.size() == 0)
		str = "";
	else if (buf.size() == 1)
		str = buf.front();
	else if (buf.size() == 2)
		str = buf.front() + " and " + buf.back();
	else
	{
		buffer::const_iterator it = buf.begin();
		str = "and " + *(it++);

		for (;it != buf.end(); it++)
			str = *it + ", " + str;
	}
	return str;
}
Esempio n. 20
0
    expr visit_cases_on(name const & fn, buffer<expr> & args) {
        name const & I_name = fn.get_prefix();
        if (is_inductive_predicate(env(), I_name))
            throw exception(sstream() << "code generation failed, inductive predicate '" << I_name << "' is not supported");
        bool is_builtin = is_vm_builtin_function(fn);
        buffer<name> cnames;
        get_intro_rule_names(env(), I_name, cnames);
        lean_assert(args.size() >= cnames.size() + 1);
        if (args.size() > cnames.size() + 1)
            distribute_extra_args_over_minors(I_name, cnames, args);
        lean_assert(args.size() == cnames.size() + 1);
        /* Process major premise */
        args[0] = visit(args[0]);
        unsigned num_reachable = 0;
        optional<expr> reachable_case;
        /* Process minor premises */
        for (unsigned i = 0; i < cnames.size(); i++) {
            buffer<bool> rel_fields;
            get_constructor_info(cnames[i], rel_fields);
            auto p = visit_minor_premise(args[i+1], rel_fields);
            expr new_minor = p.first;
            if (i == 0 && has_trivial_structure(I_name, rel_fields)) {
                /* Optimization for an inductive datatype that has a single constructor with only one relevant field */
                return beta_reduce(mk_app(new_minor, args[0]));
            }
            args[i+1] = new_minor;
            if (!p.second) {
                num_reachable++;
                reachable_case = p.first;
            }
        }

        if (num_reachable == 0) {
            return mk_unreachable_expr();
        } else if (num_reachable == 1 && !is_builtin) {
            /* Use _cases.1 */
            return mk_app(mk_cases(1), args[0], *reachable_case);
        } else if (is_builtin) {
            return mk_app(mk_constant(fn), args);
        } else {
            return mk_app(mk_cases(cnames.size()), args);
        }
    }
Esempio n. 21
0
static void trace_if_unsupported(type_context & ctx, expr const & fn,
                                 buffer<expr> const & args, unsigned prefix_sz, ss_param_infos const & result) {
    lean_assert(args.size() >= length(result));
    if (!is_fun_info_trace_enabled())
        return;
    fun_info info = get_fun_info(ctx, fn, args.size());
    buffer<param_info> pinfos;
    to_buffer(info.get_params_info(), pinfos);
    buffer<ss_param_info> ssinfos;
    to_buffer(get_subsingleton_info(ctx, fn, args.size()), ssinfos);
    lean_assert(pinfos.size() == ssinfos.size());
    /* Check if all remaining arguments are nondependent or
       dependent (but all forward dependencies are subsingletons) */
    unsigned i = prefix_sz;
    for (; i < pinfos.size(); i++) {
        param_info const & pinfo = pinfos[i];
        if (!pinfo.has_fwd_deps())
            continue; /* nondependent argument */
        if (has_nonsubsingleton_fwd_dep(i, pinfos, ssinfos))
            break; /* failed i-th argument has a forward dependent that is not a prop nor a subsingleton */
    }
    if (i == pinfos.size())
        return; // It is *cheap* case

    /* Expensive case */
    /* We generate a trace message IF it would be possible to compute more precise information.
       That is, there is an argument that is a proposition and/or subsingleton, but
       the corresponding pinfo is not a marked a prop/subsingleton.
    */
    i = 0;
    for (ss_param_info const & ssinfo : result) {
        if (ssinfo.is_subsingleton())
            continue;
        expr arg_type = ctx.infer(args[i]);
        if (ctx.mk_subsingleton_instance(arg_type)) {
            lean_trace_fun_info(
                tout() << "approximating function information for '" << fn
                << "', this may affect the effectiveness of the simplifier and congruence closure modules, "
                << "more precise information can be efficiently computed if all parameters are moved to the "
                << "beginning of the function\n";);
            return;
        }
Esempio n. 22
0
    //==========================================================================
    buffer( buffer const& src ) : parent_data(src.allocator())
    {
      parent_data::allocate(src.size());
#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && BOOST_WORKAROUND(BOOST_MSVC, < 1600)
      stdext::unchecked_copy(src.begin(),src.end(),begin());
#elif BOOST_WORKAROUND(BOOST_MSVC, > 1500)
      std::copy(src.begin(),src.end(),stdext::make_unchecked_array_iterator(begin()));
#else
      std::copy(src.begin(),src.end(),begin());
#endif
    }
Esempio n. 23
0
/* Return true if there is j s.t. pinfos[j] is not a
   proposition/subsingleton and it dependends of argument i */
static bool has_nonprop_nonsubsingleton_fwd_dep(unsigned i, buffer<param_info> const & pinfos) {
    for (unsigned j = i+1; j < pinfos.size(); j++) {
        param_info const & fwd_pinfo = pinfos[j];
        if (fwd_pinfo.is_prop() || fwd_pinfo.is_subsingleton())
            continue;
        auto const & fwd_deps        = fwd_pinfo.get_dependencies();
        if (std::find(fwd_deps.begin(), fwd_deps.end(), i) != fwd_deps.end()) {
            return true;
        }
    }
    return false;
}
Esempio n. 24
0
/**
 * Handle the reception of an asynchronous message.
 *
 * @param buff The input message bytes.
 * @param rbytes The number of bytes of the input message.
 * @param error The error code.
 */
void udp_listener::handle_receive(buffer<uint8> &buff,
								  size_t rbytes,
								  const boost::system::error_code &error)
{
	using namespace boost;

	if (!error) {
		ODTONE_LOG(1, "(udp) received ", rbytes, " bytes.");
		ODTONE_LOG(0, "(udp) from ", _rmt_endp.address().to_string(),
		    " : ", _rmt_endp.port());

		mih::frame *pud = mih::frame::cast(buff.get(), rbytes);

		if(pud) {
			// Decode IP address
			mih::octet_string ip;
			uint16 scope = 0;
			if(_rmt_endp.address().is_v4()) {
				boost::asio::ip::address_v4 ip_addr = _rmt_endp.address().to_v4();
				ip = ip_addr.to_string();
			} else if(_rmt_endp.address().is_v6()) {
				boost::asio::ip::address_v6 ip_addr = _rmt_endp.address().to_v6();
				scope = ip_addr.scope_id();
				ip_addr.scope_id(0);
				ip = ip_addr.to_string();
			}
			// Decode port
			uint16 port = _rmt_endp.port();

			meta_message_ptr in(new meta_message(ip, scope, port, *pud));
			ODTONE_LOG(4, *pud);

			// discard messages if multicast messages are not supported
			if(utils::is_multicast(in) && !_enable_multicast) {
				ODTONE_LOG(1, "(udp) Discarding message! Reason: ",
							  "multicast messages are not supported");
			} else {
				_dispatch(in);
			}
		}
	}

	void *rbuff = buff.get();
	size_t rlen = buff.size();

	_sock.async_receive_from(asio::buffer(rbuff, rlen),
				 _rmt_endp,
				 bind(&udp_listener::handle_receive,
				      this,
				      bind_rv(buff),
				      asio::placeholders::bytes_transferred,
				      asio::placeholders::error));
}
Esempio n. 25
0
constraint_index lar_solver::add_constraint(const buffer<std::pair<mpq, var_index>>& left_side, lconstraint_kind kind_par, mpq right_side_par) {
    lean_assert(left_side.size() > 0);
    constraint_index i = m_available_constr_index++;
    lean_assert(m_normalized_constraints.find(i) == m_normalized_constraints.end());
    lar_constraint original_constr(left_side, kind_par, right_side_par, i);
    canonic_left_side * ls = create_or_fetch_existing_left_side(left_side);
    mpq ratio = find_ratio_of_original_constraint_to_normalized(ls, original_constr);
    auto kind = ratio.is_neg()? flip_kind(kind_par): kind_par;
    mpq right_side = right_side_par / ratio;
    lar_normalized_constraint normalized_constraint(ls, ratio, kind, right_side, original_constr);
    m_normalized_constraints[i] = normalized_constraint;
    return i;
}
Esempio n. 26
0
/**
    \brief Given a sequence metas: <tt>(?m_1 ...) (?m_2 ... ) ... (?m_k ...)</tt>,
    we say ?m_i is "redundant" if it occurs in the type of some ?m_j.
    This procedure removes from metas any redundant element.
*/
static void remove_redundant_metas(buffer<expr> & metas) {
    buffer<expr> mvars;
    for (expr const & m : metas)
        mvars.push_back(get_app_fn(m));
    unsigned k = 0;
    for (unsigned i = 0; i < metas.size(); i++) {
        bool found = false;
        for (unsigned j = 0; j < metas.size(); j++) {
            if (j != i) {
                if (occurs(mvars[i], mlocal_type(mvars[j]))) {
                    found = true;
                    break;
                }
            }
        }
        if (!found) {
            metas[k] = metas[i];
            k++;
        }
    }
    metas.shrink(k);
}
inline event write_single_value(const T &value,
                                const buffer &buffer,
                                size_t index,
                                command_queue &queue)
{
    BOOST_ASSERT(index < buffer.size() / sizeof(T));
    BOOST_ASSERT(buffer.get_context() == queue.get_context());

    return queue.enqueue_write_buffer(buffer,
                                      index * sizeof(T),
                                      sizeof(T),
                                      &value);
}
Esempio n. 28
0
void parse_table::for_each(buffer<transition> & ts,
                           std::function<void(unsigned, transition const *,
                                              list<accepting> const &)> const & fn) const {
    if (!is_nil(m_ptr->m_accept))
        fn(ts.size(), ts.data(), m_ptr->m_accept);
    m_ptr->m_children.for_each([&](name const & k, list<pair<action, parse_table>> const & lst) {
            for (auto const & p : lst) {
                ts.push_back(transition(k, p.first));
                p.second.for_each(ts, fn);
                ts.pop_back();
            }
        });
}
inline T read_single_value(const buffer &buffer,
                           size_t index,
                           command_queue &queue)
{
    BOOST_ASSERT(index < buffer.size() / sizeof(T));
    BOOST_ASSERT(buffer.get_context() == queue.get_context());

    T value;
    queue.enqueue_read_buffer(buffer,
                              sizeof(T) * index,
                              sizeof(T),
                              &value);
    return value;
}
Esempio n. 30
0
bool BufferToArchive(MPQHANDLE &hMpq, const buffer &buf, const std::string &mpqFilePath)
{
    if ( hMpq == nullptr )
        CHKD_ERR("NULL MPQ file specified for writing buffer");
    else
    {
        DWORD dataSize = (DWORD)buf.size();
        LPVOID dataPointer = (LPVOID)buf.getPtr(0);
        if ( MpqAddFileFromBuffer(hMpq, dataPointer, dataSize, mpqFilePath.c_str(), MAFA_COMPRESS | MAFA_REPLACE_EXISTING) == TRUE )
            return true;
        else
            CHKD_ERR("Failed to add buffered file to archive");
    }
    return false;
}