Foam::masterCollatingOFstream::~masterCollatingOFstream() { autoPtr<OSstream> osPtr; if (UPstream::master()) { // Note: always write binary. These are strings so readable // anyway. They have already be tokenised on the sending side. osPtr.reset ( new OFstream ( pathName_, IOstream::BINARY, version(), IOstream::UNCOMPRESSED //compression_ ) ); //writeHeader(osPtr()); // We don't have IOobject so cannot use writeHeader OSstream& os = osPtr(); IOobject::writeBanner(os) << "FoamFile\n{\n" << " version " << version() << ";\n" << " format " << os.format() << ";\n" << " class " << decomposedBlockData::typeName << ";\n" << " location " << pathName_ << ";\n" << " object " << pathName_.name() << ";\n" << "}" << nl; IOobject::writeDivider(os) << nl; } string s(str()); UList<char> slice(const_cast<char*>(s.data()), label(s.size())); List<std::streamoff> start; decomposedBlockData::writeBlocks(osPtr, start, slice, commsType_); if (osPtr.valid() && !osPtr().good()) { FatalIOErrorInFunction(osPtr()) << "Failed writing to " << pathName_ << exit(FatalIOError); } }
// extracts rules of the form Target <= {(Src_1,Dt_1),..,(Src_n,Dt_n)} // for deltaTimes Dt_1 < .. < Dt_n void Correlator::extract_rules(JacobianRules& rules, uint64_t episode_size) { uint64_t num_calls = episode_size - SLICE_SIZE; rules.reserve(num_calls); JacobianSlice slice(SLICE_SIZE, LSTMState(32)); for (int64_t t = num_calls - 1; t >= 0; --t) { // perform sensitivity analysis corcor.getJacobian(t, t + SLICE_SIZE, slice); // the last one is the target JacobianRule rule; rule.target = findBestMatch(slice.back().begin(), rule.confidence); if (rule.confidence < OBJECT_THR) { continue; } // the later objects occurred closer to the target, so iterate in reverse JacobianSlice::reverse_iterator it = ++slice.rbegin(); JacobianSlice::reverse_iterator end = slice.rend(); for (uint16_t deltaTime = 1; it != end; ++it, ++deltaTime) { double match; r_code::Code* source = findBestMatch(it->begin(), match); // no confidence, no correlation if (match >= OBJECT_THR) { Source src; src.source = source; src.deltaTime = deltaTime; rule.sources.push_back(src); rule.confidence += match; } } if (!rule.sources.empty()) { rule.confidence /= rule.sources.size() + 1; if (rule.confidence >= RULE_THR) { rules.push_back(rule); } } } }
int SSDBImpl::getset(const Bytes &key, std::string *val, const Bytes &newval, char log_type){ if(key.empty()){ log_error("empty key!"); //return -1; return 0; } Transaction trans(binlogs); int found = this->get(key, val); std::string buf = encode_kv_key(key); binlogs->Put(buf, slice(newval)); binlogs->add_log(log_type, BinlogCommand::KSET, buf); leveldb::Status s = binlogs->commit(); if(!s.ok()){ log_error("set error: %s", s.ToString().c_str()); return -1; } return found; }
vector<gpu::GpuMat> SimilarityAssessment::splitDatasetGPU(Handle3DDataset <imgT>dataset) { DATAINFO imgInfo = dataset.getDatasetInfo(0); vector<gpu::GpuMat> datasetSlicesGPU; for( int i = 0; i < imgInfo.resDepth; i++ ) { unsigned short** d = dataset.getDataset(0); Mat slice(imgInfo.resHeight,imgInfo.resWidth,CV_16UC1,d[i]); Mat plane; gpu::GpuMat planeGPU; slice.convertTo(plane,CV_8UC3); planeGPU.upload(plane); datasetSlicesGPU.push_back(planeGPU); } return datasetSlicesGPU; }
bool scratch_programt::check_sat() { fix_types(); add_instruction(END_FUNCTION); #ifdef DEBUG cout << "Checking following program for satness:" << endl; output(ns, "scratch", cout); #endif symex.constant_propagation = constant_propagation; goto_symex_statet::propagationt::valuest constants; symex(symex_state, functions, *this); slice(equation); equation.convert(checker); return (checker.dec_solve() == decision_proceduret::D_SATISFIABLE); }
big_number leveldb_chain_keeper::end_slice_difficulty(size_t slice_begin_index) { big_number total_work = 0; leveldb_iterator it(db_.block->NewIterator(leveldb::ReadOptions())); data_chunk raw_height = uncast_type(slice_begin_index); for (it->Seek(slice(raw_height)); it->Valid(); it->Next()) { constexpr size_t bits_field_offset = 4 + 2 * hash_digest_size + 4; BITCOIN_ASSERT(it->value().size() >= 84); // Deserialize only the bits field of block header. const char* bits_field_begin = it->value().data() + bits_field_offset; std::string raw_bits(bits_field_begin, 4); auto deserial = make_deserializer(raw_bits.begin(), raw_bits.end()); uint32_t bits = deserial.read_4_bytes(); // Accumulate the total work. total_work += block_work(bits); } return total_work; }
ATerm lf_8_recursive ( ATerm arg0 ) { { ATerm tmp [ 7 ] ; FUNC_ENTRY ( lf_8_recursivesym , ATmakeAppl ( lf_8_recursivesym , arg0 ) ) ; ( tmp [ 1 ] = arg0 ) ; { ATerm atmp001110 ; ATerm atmp00110 [ 2 ] ; ATerm atmp0010 ; ATerm atmp000 [ 2 ] ; ( atmp000 [ 0 ] = tmp [ 1 ] ) ; ( atmp000 [ 1 ] = tmp [ 1 ] ) ; while ( not_empty_list ( tmp [ 1 ] ) ) { ( atmp0010 = list_head ( tmp [ 1 ] ) ) ; ( tmp [ 1 ] = list_tail ( tmp [ 1 ] ) ) ; ( atmp00110 [ 0 ] = tmp [ 1 ] ) ; ( atmp00110 [ 1 ] = tmp [ 1 ] ) ; while ( not_empty_list ( tmp [ 1 ] ) ) { ( atmp001110 = list_head ( tmp [ 1 ] ) ) ; ( tmp [ 1 ] = list_tail ( tmp [ 1 ] ) ) ; if ( term_equal ( atmp0010 , atmp001110 ) ) { ( tmp [ 2 ] = lf_8_recursive ( cons ( make_list ( atmp0010 ) , tmp [ 1 ] ) ) ) ; if ( check_sym ( tmp [ 2 ] , lf_8_recursivesym ) ) { ( tmp [ 3 ] = arg_0 ( tmp [ 2 ] ) ) ; ( tmp [ 4 ] = arg_0 ( tmp [ 3 ] ) ) ; if ( not_empty_list ( tmp [ 4 ] ) ) { ( tmp [ 5 ] = list_head ( tmp [ 4 ] ) ) ; ( tmp [ 4 ] = list_tail ( tmp [ 4 ] ) ) ; ( tmp [ 6 ] = lf_8_recursive ( cons ( slice ( atmp000 [ 0 ] , atmp000 [ 1 ] ) , cons ( make_list ( tmp [ 5 ] ) , cons ( slice ( atmp00110 [ 0 ] , atmp00110 [ 1 ] ) , tmp [ 4 ] ) ) ) ) ) ; FUNC_EXIT ( tmp [ 6 ] ) ; } } } ( atmp00110 [ 1 ] = list_tail ( atmp00110 [ 1 ] ) ) ; ( tmp [ 1 ] = atmp00110 [ 1 ] ) ; } ( atmp000 [ 1 ] = list_tail ( atmp000 [ 1 ] ) ) ; ( tmp [ 1 ] = atmp000 [ 1 ] ) ; } } FUNC_EXIT ( make_nf1 ( lf_8_recursivesym , lf_list_6 ( arg0 ) ) ) ; } }
inline beziersurface<point_t> beziervolume<point_t>::slice( boundary_t b ) const { switch ( b ) { case umin : return slice ( point_type::u, 0); case umax : return slice ( point_type::u, _points.width() - 1); case vmin : return slice ( point_type::v, 0); case vmax : return slice ( point_type::v, _points.height() - 1); case wmin : return slice ( point_type::w, 0); case wmax : return slice ( point_type::w, _points.depth() - 1); default : throw std::runtime_error("invalid boundary type"); } }
// if sizes==0, then keep current shape int Data::realloc(Data::Type t, const int * sizes, int n){ Data old(*this); // REV0 if(sizes){ // new shape requested clear(); shape(sizes, n); } else{ // just changing type, leave shape unchanged // Data old(*this); // REV0 clear(); shape(old.mSizes, old.maxDim()); } if(size()){ mType = t; mStride= 1; switch(type()){ case Data::BOOL: mData = pointer(new bool[size()]); break; case Data::INT: mData = pointer(new int[size()]); break; case Data::FLOAT: mData = pointer(new float[size()]); break; case Data::DOUBLE: mData = pointer(new double[size()]); break; case Data::STRING: mData = pointer(new std::string[size()]); break; default: goto end; } acquire(mData); offset(0); // if(hasData() && isNumerical()) assignAll(0); // REV0 if(hasData() && isNumerical()){ if(old.hasData()){ assign(old); // copy over as many old elements as possible if(size() > old.size()) slice(old.size()).assignAll(0); } else{ assignAll(0); } } } end: return sizeBytes() - old.sizeBytes(); }
StringData* StringData::shrinkImpl(size_t len) { assert(!isImmutable() && !hasMultipleRefs()); assert(isFlat()); assert(len <= m_len); assert(len <= capacity()); auto const sd = Make(len); auto const src = slice(); auto const dst = sd->mutableData(); assert(len <= src.len); sd->setSize(len); auto const mcret = memcpy(dst, src.ptr, len); auto const ret = static_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. assert(ret == sd); assert(ret->checkSane()); return ret; }
StringData* StringData::reserve(size_t cap) { assert(!isImmutable() && !hasMultipleRefs()); assert(isFlat()); if (cap <= capacity()) return this; cap = std::min(cap + cap/4, size_t(MaxSize) + 1); auto const sd = Make(cap); auto const src = slice(); auto const dst = sd->mutableData(); sd->setSize(src.len); auto const mcret = memcpy(dst, src.ptr, src.len); auto const ret = static_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. assert(ret == sd); assert(ret->checkSane()); return ret; }
DataType StringData::isNumericWithVal(int64_t &lval, double &dval, int allow_errors, int* overflow) const { if (m_hash < 0) return KindOfNull; DataType ret = KindOfNull; auto s = slice(); if (s.size()) { ret = is_numeric_string( s.data(), s.size(), &lval, &dval, allow_errors, overflow ); if (ret == KindOfNull && !isProxy() && allow_errors) { m_hash |= STRHASH_MSB; } } return ret; }
bool leveldb_chain_keeper::end_slice(size_t slice_begin_index, block_detail_list& sliced_blocks) { leveldb::WriteBatch blk_batch, blk_hash_batch; leveldb_transaction_batch tx_batch; leveldb_iterator it(db_blocks_->NewIterator(leveldb::ReadOptions())); data_chunk raw_depth = uncast_type(slice_begin_index); for (it->Seek(slice(raw_depth)); it->Valid(); it->Next()) { std::stringstream ss; ss.str(it->value().ToString()); protobuf::Block proto_block; proto_block.ParseFromIstream(&ss); // Convert protobuf block header into actual block block_type sliced_block; if (!reconstruct_block(common_, proto_block, sliced_block)) return false; // Add to list of sliced blocks block_detail_ptr sliced_detail = std::make_shared<block_detail>(sliced_block); sliced_blocks.push_back(sliced_detail); // Make sure to delete hash secondary index too. hash_digest block_hash = hash_block_header(sliced_block); // Delete block header... blk_batch.Delete(it->key()); // And it's secondary index. blk_hash_batch.Delete(slice_block_hash(block_hash)); // Remove txs + spends + addresses too for (const transaction_type& block_tx: sliced_block.transactions) if (!clear_transaction_data(tx_batch, block_tx)) return false; } leveldb::WriteOptions options; // Execute batches. db_blocks_->Write(options, &blk_batch); db_blocks_hash_->Write(options, &blk_hash_batch); db_txs_->Write(options, &tx_batch.tx_batch); db_spends_->Write(options, &tx_batch.spends_batch); db_address_->Write(options, &tx_batch.address_batch); return true; }
bool scratch_programt::check_sat(bool do_slice) { fix_types(); add_instruction(END_FUNCTION); remove_skip(*this); update(); #ifdef DEBUG std::cout << "Checking following program for satness:\n"; output(ns, "scratch", std::cout); #endif symex.constant_propagation=constant_propagation; goto_symex_statet::propagationt::valuest constants; symex(symex_state, functions, *this); if(do_slice) { slice(equation); } if(equation.count_assertions()==0) { // Symex sliced away all our assertions. #ifdef DEBUG std::cout << "Trivially unsat\n"; #endif return false; } equation.convert(*checker); #ifdef DEBUG std::cout << "Finished symex, invoking decision procedure.\n"; #endif return (checker->dec_solve()==decision_proceduret::D_SATISFIABLE); }
static std::string toString(const Matrix& matrix, size_t limit) { if(matrix.empty()) { return "[]"; } if(matrix.size().size() <= 2) { return toString2D(matrix, limit); } size_t lastDimension = matrix.size().back(); std::stringstream stream; stream << "[\n"; for(size_t i = 0; i < lastDimension; ++i) { auto base = matrix.size(); auto start = zeros(base); auto end = base; start.back() = i; end.back() = i + 1; auto newSize = base; newSize.pop_back(); stream << toString(reshape(slice(matrix, start, end), newSize), limit); stream << ",\n"; } stream << "]"; return stream.str(); }
bool leveldb_common::fetch_spend(const output_point& spent_output, input_point& input_spend) { data_chunk spent_key = create_spent_key(spent_output); std::string raw_spend; leveldb::Status status = db_.spend->Get( leveldb::ReadOptions(), slice(spent_key), &raw_spend); if (status.IsNotFound()) return false; else if (!status.ok()) { log_fatal(LOG_BLOCKCHAIN) << "fetch_spend: " << status.ToString(); return false; } const data_chunk raw_spend_data(raw_spend.begin(), raw_spend.end()); auto deserial = make_deserializer( raw_spend_data.begin(), raw_spend_data.end()); input_spend.hash = deserial.read_hash(); input_spend.index = deserial.read_4_bytes(); return true; }
int main(int argc, char **argv) { int parts = atoi(argv[3]); const char *srcPath = argv[1]; const char *destPath = argv[2]; slice(srcPath, destPath, parts); char **partsArr = calloc(parts, sizeof(char *)); if (!partsArr) { printf("No memory to allocate"); return 1; } int i; for (i = 0; i < parts; i++) { size_t destLen = strlen(destPath); partsArr[i] = calloc(11 + destLen, sizeof(char)); if (!partsArr[i]) { printf("No memory to allocate"); return 1; } sprintf(partsArr[i], "%sPart-%d.jpg", destPath, i); } assemble(partsArr, destPath); for (i = 0; i < parts; i++) { free(partsArr[i]); } free(partsArr); return 0; }
void StringData::dump() const { StringSlice s = slice(); printf("StringData(%d) (%s%s%s%d): [", _count, isLiteral() ? "literal " : "", isShared() ? "shared " : "", isStatic() ? "static " : "", s.len); for (uint32_t i = 0; i < s.len; i++) { char ch = s.ptr[i]; if (isprint(ch)) { std::cout << ch; } else { printf("\\x%02x", ch); } } #ifdef TAINTED printf("\n"); this->getTaintDataRefConst().dump(); #endif printf("]\n"); }
message message::extract_impl(size_t start, message_handler handler) const { auto s = size(); for (size_t i = start; i < s; ++i) { for (size_t n = (s - i) ; n > 0; --n) { auto next_slice = slice(i, n); auto res = handler(next_slice); if (res) { std::vector<size_t> mapping(s); std::iota(mapping.begin(), mapping.end(), size_t{0}); auto first = mapping.begin() + static_cast<ptrdiff_t>(i); auto last = first + static_cast<ptrdiff_t>(n); mapping.erase(first, last); if (mapping.empty()) { return message{}; } message next{detail::decorated_tuple::make(vals_, std::move(mapping))}; return next.extract_impl(i, handler); } } } return *this; }
bool leveldb_common::get_transaction(leveldb_tx_info& tx_info, const hash_digest& tx_hash, bool read_parent, bool read_tx) { // First we try to read the bytes from the database. std::string value; leveldb::Status status = db_.tx->Get( leveldb::ReadOptions(), slice(tx_hash), &value); if (status.IsNotFound()) return false; else if (!status.ok()) { log_fatal(LOG_BLOCKCHAIN) << "get_transaction(" << tx_hash << "): " << status.ToString(); return false; } // Read the parent block height and our index in that block (if neccessary). BITCOIN_ASSERT(value.size() > 8); if (read_parent) { auto deserial = make_deserializer(value.begin(), value.begin() + 8); tx_info.height = deserial.read_4_bytes(); tx_info.index = deserial.read_4_bytes(); } if (!read_tx) return true; // Read the actual transaction (if neccessary). try { BITCOIN_ASSERT(value.size() > 8); satoshi_load(value.begin() + 8, value.end(), tx_info.tx); } catch (end_of_stream) { return false; } BITCOIN_ASSERT(satoshi_raw_size(tx_info.tx) + 8 == value.size()); BITCOIN_ASSERT(hash_transaction(tx_info.tx) == tx_hash); return true; }
// Split a line at suitable positions to make it shorter than // maxWidth. The line should not contain embedded line breaks. static void split_line(const TextInfo& info, const utf8_string& string, coord maxWidth, text_lines_t& result) { size_t wordStart = 0; size_t wordEnd = 0; utf8_string line; do { wordEnd = string.find(chars::space, wordStart); if (wordEnd == std::string::npos){ wordEnd = string.size(); } utf8_string word = slice(string, wordStart, wordEnd); const coord width = info.GetWidth(line + chars::space + word); if (!line.empty() && width > maxWidth){ result.push_back(TextLine::SoftBreak(width, line + chars::space)); line.clear(); } if (info.GetWidth(word) > maxWidth){ word = split_word(info, word, result); } if (!line.empty()){ line += chars::space; } line += word; wordStart = wordEnd + 1; } while (wordEnd != string.size()); if (line.size() > 1){ const utf8_string last(line + chars::space); const coord width = info.GetWidth(last); result.push_back(TextLine::SoftBreak(width, last)); } }
/* virtual */ void idx_rbtree::select(const type_slice where[], const_dataset::index::iterator& func) const { type_slice key; /*if (row.metadata() != &(table_->metadata())) { MLOG_WARN << this << "->idx_rbtree::select use row meta data[" << row.metadata() << "] do not match index meta data:" << table_->metadata() << endl; return; }*/ if (size_ > 1) key.type = buffer_metadata::STR; else // do not check again!!! key.type = table_->metadata().get_cfg_entry(*idx_keys_)->type(); // key.data = processing_utils::encoding(idx_keys_, size_, ':', row); slice keys_slice[size_]; int len = processing_utils::encoding2slice(where, size_, 1, keys_slice); if (len < 1) return; // empty string is not as a index value! add by waq@2011-08-31 char* key_buff = new char[len]; processing_utils::encoding(keys_slice, size_, key_buff, len, ':'); key.data = slice(key_buff, len); // cout << "idx_rbtree::select args:" << key << endl; pair<slice_map_t::const_iterator, slice_map_t::const_iterator> ret = map_.equal_range(key); // MLOG_DEBUG << this << "->idx_rbtree::select key=" << key // << (ret.first == ret.second ? " not " : " ") << "found!" << endl; delete[] key.data.data(); const_dataset::table::rows_t::size_type rownum = 0; for (slice_map_t::const_iterator itr = ret.first; itr != ret.second; ++itr, ++rownum) { // MLOG_DEBUG << rownum << '>' << *(table_->get(itr->second)) << endl; if (!func(*(table_->get(itr->second)), rownum)) // table cant contains null break; } #if MLOG_LEVEL < 20 && 0 // debug cout << this << "->idx_rbtree::select index show all:" << endl; slice_map_t::const_iterator itr = map_.begin(), end = map_.end(); for (; itr != end; ++itr) { cout << itr->first << '=' << itr->second << endl; } #endif }
ATerm lf_list_1 ( ATerm arg0 ) { { ATerm tmp [ 1 ] ; FUNC_ENTRY ( lf_list_1sym , ATmakeAppl ( lf_list_1sym , arg0 ) ) ; { ATerm ltmp [ 1 ] ; lbl_lf_list_1 : ltmp [ 0 ] = arg0 ; ( tmp [ 0 ] = ltmp [ 0 ] ) ; { ATerm atmp01110 ; ATerm atmp0110 [ 2 ] ; ATerm atmp010 ; ATerm atmp00 [ 2 ] ; ( atmp00 [ 0 ] = tmp [ 0 ] ) ; ( atmp00 [ 1 ] = tmp [ 0 ] ) ; while ( not_empty_list ( tmp [ 0 ] ) ) { ( atmp010 = list_head ( tmp [ 0 ] ) ) ; ( tmp [ 0 ] = list_tail ( tmp [ 0 ] ) ) ; ( atmp0110 [ 0 ] = tmp [ 0 ] ) ; ( atmp0110 [ 1 ] = tmp [ 0 ] ) ; while ( not_empty_list ( tmp [ 0 ] ) ) { ( atmp01110 = list_head ( tmp [ 0 ] ) ) ; ( tmp [ 0 ] = list_tail ( tmp [ 0 ] ) ) ; if ( term_equal ( atmp010 , atmp01110 ) ) { ( arg0 = cons ( slice ( atmp00 [ 0 ] , atmp00 [ 1 ] ) , cons ( make_list ( atmp010 ) , cons ( slice ( atmp0110 [ 0 ] , atmp0110 [ 1 ] ) , tmp [ 0 ] ) ) ) ) ; goto lbl_lf_list_1 ; } ( atmp0110 [ 1 ] = list_tail ( atmp0110 [ 1 ] ) ) ; ( tmp [ 0 ] = atmp0110 [ 1 ] ) ; } ( atmp00 [ 1 ] = list_tail ( atmp00 [ 1 ] ) ) ; ( tmp [ 0 ] = atmp00 [ 1 ] ) ; } } FUNC_EXIT ( make_nf1 ( lf_list_1sym , ltmp [ 0 ] ) ) ; } } }
// mutations void StringData::setChar(int offset, CStrRef substring) { assert(!isStatic()); if (offset >= 0) { StringSlice s = slice(); if (s.len == 0) { // PHP will treat data as an array and we don't want to follow that. throw OffsetOutOfRangeException(); } char c = substring.empty() ? 0 : substring.data()[0]; if (uint32_t(offset) < s.len) { ((char*)s.ptr)[offset] = c; } else if (offset <= RuntimeOption::StringOffsetLimit) { uint32_t newlen = offset + 1; MutableSlice buf = isImmutable() ? escalate(newlen) : reserve(newlen); memset(buf.ptr + s.len, ' ', newlen - s.len); buf.ptr[offset] = c; setSize(newlen); } else { throw OffsetOutOfRangeException(); } m_hash = 0; // since we modified the string. } }
static void write_var_type( Stream &os, Expr type ) { if ( type ) { int ps = arch->ptr_size; ClassInfo *ci = ip->class_info( slice( type, 0, ps ) ); os << ip->glob_nstr_cor.str( ci->name ); if ( ci->arg_names.size() ) { os << "["; // for( int i = 0; i < type.size_in_bits() / ps / 2; ++i ) // //os << slice( type, ( 2 * i + 1 ) * ps, ( 2 * i + 2 ) * ps ) << ";"; //// PRINT( type.size_in_bits() / ps / 2 ); // for( int i = 0; i < type.size_in_bits() / ps / 2; ++i ) { // if ( i ) // os << ","; // write_var( os, // slice( type, ( 2 * i + 1 ) * ps, ( 2 * i + 2 ) * ps ), // type // slice( type, ( 2 * i + 2 ) * ps, ( 2 * i + 3 ) * ps ), // data // true ); // } os << "...]"; } } else os << "UndefinedType"; }
void DFT::inverse(float * output){ //printf("DFT::inverse(float *)\n"); switch(mSpctFormat){ case Bin::Polar: case Bin::MagFreq: //arr::mul(bins0(), gen::val(normInverse()), nbins); POLAR_RECT break; default:; //arr::mul(bins0(), gen::val(normInverse()), nbins<<1); } // arrange/scale bins for inverse xfm // TODO: can we avoid this move by pointer offsetting? mem::deepMove(mBuf+1, mBuf+2, sizeDFT()-1); slice(mBuf+1, sizeDFT()-2) *= 0.5f; mFFT.inverse(mBuf); // o.a. inverse window with prev spill if(sizePad() > 0){ arr::add(mBuf, mPadOA, scl::min(sizePad(), sizeWin())); // add prev spill if(sizePad() <= sizeWin()){ // no spill overlap mem::deepCopy(mPadOA, mBuf + sizeWin(), sizePad()); // save current spill } else{ // spill overlaps // add and save current spill to previous arr::add(mPadOA, mBuf + sizeWin(), mPadOA + sizeWin(), sizePad() - sizeWin()); mem::deepCopy(mPadOA + sizePad() - sizeWin(), mBuf + sizePad(), sizeWin()); } } if(output) mem::deepCopy(output, mBuf, sizeWin()); }
/* * Get or set +self+'s endianness * @overload ptr.order * @return [:big, :little] endianness of +self+ * @overload ptr.order(order) * @param [Symbol] order endianness to set (+:little+, +:big+ or +:network+). +:big+ and +:network+ * are synonymous. * @return [self] */ static VALUE ptr_order(int argc, VALUE* argv, VALUE self) { Pointer* ptr; Data_Get_Struct(self, Pointer, ptr); if (argc == 0) { int order = (ptr->memory.flags & MEM_SWAP) == 0 ? BYTE_ORDER : SWAPPED_ORDER; return order == BIG_ENDIAN ? ID2SYM(rb_intern("big")) : ID2SYM(rb_intern("little")); } else { VALUE rbOrder = Qnil; int order = BYTE_ORDER; if (rb_scan_args(argc, argv, "1", &rbOrder) < 1) { rb_raise(rb_eArgError, "need byte order"); } if (SYMBOL_P(rbOrder)) { ID id = SYM2ID(rbOrder); if (id == rb_intern("little")) { order = LITTLE_ENDIAN; } else if (id == rb_intern("big") || id == rb_intern("network")) { order = BIG_ENDIAN; } } if (order != BYTE_ORDER) { Pointer* p2; VALUE retval = slice(self, 0, ptr->memory.size); Data_Get_Struct(retval, Pointer, p2); p2->memory.flags |= MEM_SWAP; return retval; } return self; } }
int main(int argc, char **argv) { if (argc < 4) { die("Usage: ./prog <src-file> <destination path> <parts count>"); } size_t partsCount = atoi(argv[3]); const char *srcPath = argv[1]; const char *destPath = argv[2]; // size_t partsCount = 3; // const char *srcPath = "test.txt"; // const char *destPath = ""; slice(srcPath, destPath, partsCount); char **partNames = calloc(partsCount, sizeof(char *)); size_t i; for(i = 0; i < partsCount; i++) { partNames[i] = calloc(11 + FILE_PATH_LENGTH, sizeof(char)); sprintf(partNames[i], "%sPart-%d.%c%c%c", destPath, i+1, srcPath[strlen(srcPath)-3], srcPath[strlen(srcPath)-2], srcPath[strlen(srcPath)-1]); } assemble(partNames, destPath); return 0; }
int SSDBImpl::multi_set(const std::vector<Bytes> &kvs, int offset, char log_type){ Transaction trans(binlogs); std::vector<Bytes>::const_iterator it; it = kvs.begin() + offset; for(; it != kvs.end(); it += 2){ const Bytes &key = *it; if(key.empty()){ log_error("empty key!"); return 0; //return -1; } const Bytes &val = *(it + 1); std::string buf = encode_kv_key(key); binlogs->Put(buf, slice(val)); binlogs->add_log(log_type, BinlogCommand::KSET, buf); } leveldb::Status s = binlogs->commit(); if(!s.ok()){ log_error("multi_set error: %s", s.ToString().c_str()); return -1; } return (kvs.size() - offset)/2; }
void Matcher::add(const std::string& str) { slices s; slice(s, str, true); if (!s.size()) s.emplace_back(str.data(), 0); for (auto i = s.begin(); i != s.end(); i++) { auto p = &(*um_emplace(index, *i, std::vector<Matcher::target_slice>()).first).second; auto sz = p->size(); if (!sz || (*p)[sz - 1].index != targets.size()) p->emplace_back(targets.size(), 1); else (*p)[sz - 1].count++; } targets.emplace_back(&str, s.size()); }