int main(int argc, char **argv)
{
    unsigned int output_size;
    unsigned char buf[256];
    char c1, c2;
    unsigned int diffpos, pos = 0;
    FILE *file;
    if (argc < 4)
    {
        printf("sucreate <input dir> <xbox_su_file.bin> <output size>\n");
        return 0;
    }
    file = fopen(argv[2], "w+b");
    output_size = atoi(argv[3]);

    //    write_zero_data(file, output_size);
    write_misc_data(file);
    write_files(file, argv[1]);
    write_all_hash_blocks(file, output_size);
    // write hash of all hash blocks (master hash table)
    write_master_hash_table(file);
    //write top hash table hash
    write_hash(file, SECOND_HASH_OFFSET, SECOND_HASH_START, BLOCK_SIZE, 1);
    // write master hash
    write_hash(file, MASTER_HASH_OFFSET, HASH_DATA_START, HASH_DATA_LEN, 1);
    fclose(file);
    return 0;
}
void write_all_hash_blocks(FILE *f, unsigned int filesize)
{
    unsigned int i, j=1, off, blockoff;
    unsigned int idval, block = 1;

    off = BLOCK_HASH_START;
    for(i = 0; i < 0xAA; i++)
    {
        idval = get_id(f, block);
        write_hash(f, (off+24*i), (FILE_HEADER_OFFSET+(BLOCK_SIZE*i)), BLOCK_SIZE, SWAP32(idval));
        block++;
    }

    while(1)
    {
         off = FILE_START_OFFSET + (0xAA * BLOCK_SIZE * j) + (BLOCK_SIZE * (j-1));
         if (off > filesize) break;
         for(i = 0; i < 0xAA; i++)
         {
             idval = get_id(f, block);

             blockoff = FILE_START_OFFSET+(BLOCK_SIZE*((0xAB*j)+i));
             if (blockoff > (filesize-1)) break;

             write_hash(f, (off+24*i), blockoff, BLOCK_SIZE, SWAP32(idval));
             block++;
         }

         j++;
     }
}
void block_database::store(const block_type& block)
{
    const uint32_t height = index_.count();
    const auto number_txs = block.transactions.size();
    const uint32_t number_txs32 = static_cast<uint32_t>(number_txs);

    // Write block data.
    const auto write = [&](uint8_t* data)
    {
        satoshi_save(block.header, data);
        auto serial = make_serializer(data + 80);
        serial.write_4_bytes(height);
        serial.write_4_bytes(number_txs32);
        for (const auto& tx: block.transactions)
        {
            const auto tx_hash = hash_transaction(tx);
            serial.write_hash(tx_hash);
        }
    };

    const auto key = hash_block_header(block.header);
    const auto value_size = 80 + 4 + 4 + number_txs * hash_size;
    const auto position = map_.store(key, write, value_size);

    // Write height -> position mapping.
    write_position(position);
}
示例#4
0
void wrap_fetch_transaction_args(data_chunk& data, const hash_digest& tx_hash)
{
    data.resize(hash_digest_size);
    auto serial = make_serializer(data.begin());
    serial.write_hash(tx_hash);
    BITCOIN_ASSERT(serial.iterator() == data.end());
}
示例#5
0
data_chunk create_spent_key(const Point& point)
{
    data_chunk spent_key(hash_digest_size + 4);
    auto serial = make_serializer(spent_key.begin());
    serial.write_hash(point.hash);
    serial.write_4_bytes(point.index);
    return spent_key;
}
uint32_t addr_key_checksum(const output_point& outpoint)
{
    data_chunk chksum_data(hash_digest_size + 4);
    auto serial = make_serializer(chksum_data.begin());
    serial.write_hash(outpoint.hash);
    serial.write_4_bytes(outpoint.index);
    BITCOIN_ASSERT(
        std::distance(chksum_data.begin(), serial.iterator()) ==
        hash_digest_size + 4);
    return generate_sha256_checksum(chksum_data);
}
示例#7
0
uint64_t addr_key_checksum(const output_point& outpoint)
{
    data_chunk checksum_data(hash_digest_size + 4);
    auto serial = make_serializer(checksum_data.begin());
    serial.write_hash(outpoint.hash);
    serial.write_4_bytes(outpoint.index);
    BITCOIN_ASSERT(serial.iterator() == checksum_data.end());
    hash_digest hash = generate_sha256_hash(checksum_data);
    data_chunk raw_checksum(hash.begin(), hash.begin() + 8);
    return cast_chunk<uint64_t>(raw_checksum);
}
// write the master hash table
void write_master_hash_table(FILE *f)
{
    unsigned int i = 1;
    unsigned int off, blockoff;
    unsigned int filesize = get_file_size(f);

    off = SECOND_HASH_START;
    blockoff = BLOCK_HASH_START;
    write_hash(f, off, blockoff, BLOCK_SIZE, 1);

    while(1)
    {
        off = SECOND_HASH_START + i * 24;
        blockoff = FILE_START_OFFSET + (0xAA * BLOCK_SIZE * i) + (BLOCK_SIZE * (i-1));
        if (blockoff > (filesize-1)) break;

        write_hash(f, off, blockoff, BLOCK_SIZE, 0);
        i++;
    }
}
示例#9
0
hash_digest build_merkle_tree(hash_list& merkle)
{
    // Stop if hash list is empty.
    if (merkle.empty())
        return null_hash;
    else if (merkle.size() == 1)
        return merkle[0];

    // While there is more than 1 hash in the list, keep looping...
    while (merkle.size() > 1)
    {
        // If number of hashes is odd, duplicate last hash in the list.
        if (merkle.size() % 2 != 0)
            merkle.push_back(merkle.back());
        // List size is now even.
        BITCOIN_ASSERT(merkle.size() % 2 == 0);

        // New hash list.
        hash_list new_merkle;
        // Loop through hashes 2 at a time.
        for (auto it = merkle.begin(); it != merkle.end(); it += 2)
        {
            // Join both current hashes together (concatenate).
            data_chunk concat_data(hash_size * 2);
            auto concat = make_serializer(concat_data.begin());
            concat.write_hash(*it);
            concat.write_hash(*(it + 1));
            BITCOIN_ASSERT(concat.iterator() == concat_data.end());
            // Hash both of the hashes.
            hash_digest new_root = bitcoin_hash(concat_data);
            // Add this to the new list.
            new_merkle.push_back(new_root);
        }
        // This is the new list.
        merkle = new_merkle;
    }
    // Finally we end up with a single item.
    return merkle[0];
}
示例#10
0
void hashtable_database_writer::store(const hash_digest& key_hash,
    size_t value_size, write_value_function write)
{
    // Calculate the end of the last record.
    const uint64_t header_size = 24 + buckets_ * 8;
    const uint64_t records_end_offset = header_size + total_records_size_;
    // [ tx hash ]              32
    // [ varuint value size ]
    // [ ... value data ... ]
    // [ next tx in bucket ]    8
    const size_t record_size =
        32 + variable_uint_size(value_size) + value_size + 8;
    // If a record crosses a page boundary then we align it with
    // the beginning of the next page.
    const size_t record_begin =
        align_if_crossing_page(page_size_, records_end_offset, record_size);
    BITCOIN_ASSERT(file_.size() >= record_begin + record_size);
    // We will insert new transactions at the beginning of the bucket's list.
    // I assume that more recent transactions in the blockchain are used
    // more often than older ones.
    // We lookup the existing value in the bucket first.
    const uint64_t bucket_index = remainder(key_hash.data(), buckets_);
    BITCOIN_ASSERT(bucket_index < buckets_);
    const uint64_t previous_bucket_value = read_bucket_value(bucket_index);
    // Now begin writing the record itself.
    uint8_t* entry = file_.data() + record_begin;
    auto serial = make_serializer(entry);
    serial.write_hash(key_hash);
    serial.write_variable_uint(value_size);
    // Call the supplied callback to serialize the data.
    write(serial.iterator());
    serial.set_iterator(serial.iterator() + value_size);
    serial.write_8_bytes(previous_bucket_value);
    BITCOIN_ASSERT(serial.iterator() == entry + record_size);
    // Change file size value at file start.
    // This must be done first so any subsequent writes don't
    // overwrite this record in case of a crash or interruption.
    BITCOIN_ASSERT(record_begin >= header_size);
    const uint64_t alignment_padding =
        record_begin - header_size - total_records_size_;
    BITCOIN_ASSERT(alignment_padding <= page_size_);
    total_records_size_ += record_size + alignment_padding;
    // Now add record to bucket.
    const uint64_t record_begin_offset = record_begin - header_size;
    link_record(bucket_index, record_begin_offset);
}
示例#11
0
void add_stealth_info(const data_chunk& stealth_data,
    const payment_address& address, const hash_digest& tx_hash,
    stealth_database& db)
{
    const stealth_bitfield bitfield = calculate_bitfield(stealth_data);
    const data_chunk ephemkey = read_ephemkey(stealth_data);
    auto write_func = [&](uint8_t *it)
    {
        auto serial = make_serializer(it);
        serial.write_uint_auto(bitfield);
        serial.write_data(ephemkey);
        serial.write_byte(address.version());
        serial.write_short_hash(address.hash());
        serial.write_hash(tx_hash);
        BITCOIN_ASSERT(serial.iterator() == it + bitfield_size + 33 + 21 + 32);
    };
    db.store(write_func);
}
示例#12
0
bool leveldb_common::save_block(
    uint32_t height, const block_type& serial_block)
{
    leveldb_transaction_batch batch;
    // Write block header + tx hashes
    data_chunk raw_block_data(
        80 + 4 + serial_block.transactions.size() * hash_digest_size);
    // Downcast to base header type so serializer selects that.
    auto header_end = satoshi_save(
        serial_block.header, raw_block_data.begin());
    BITCOIN_ASSERT(std::distance(raw_block_data.begin(), header_end) == 80);
    auto serial_hashes = make_serializer(header_end);
    // Write the number of transactions...
    serial_hashes.write_4_bytes(serial_block.transactions.size());
    // ... And now the tx themselves.
    for (uint32_t tx_index = 0;
        tx_index < serial_block.transactions.size(); ++tx_index)
    {
        const transaction_type& block_tx =
            serial_block.transactions[tx_index];
        const hash_digest& tx_hash = hash_transaction(block_tx);
        if (!save_transaction(batch, height, tx_index, tx_hash, block_tx))
        {
            log_fatal(LOG_BLOCKCHAIN) << "Could not save transaction";
            return false;
        }
        serial_hashes.write_hash(tx_hash);
    }
    BITCOIN_ASSERT(serial_hashes.iterator() ==
        raw_block_data.begin() + 80 + 4 +
            serial_block.transactions.size() * hash_digest_size);
    data_chunk raw_height = uncast_type(height);
    hash_digest block_hash = hash_block_header(serial_block.header);
    // Write block header
    batch.block.Put(slice(raw_height), slice(raw_block_data));
    batch.block_hash.Put(slice_block_hash(block_hash), slice(raw_height));
    // Execute batches.
    db_.write(batch);
    // Sync stealth database.
    db_stealth_->sync(height);
    return true;
}
示例#13
0
bool add_credit(leveldb::WriteBatch& batch,
    const payment_address& address, uint64_t output_value,
    const output_point& outpoint, uint32_t block_height)
{
    data_chunk addr_key = create_address_key(address, outpoint);
    // outpoint, value, block_height
    data_chunk row_info(36 + 8 + 4);
    auto serial = make_serializer(row_info.begin());
    // outpoint
    serial.write_hash(outpoint.hash);
    serial.write_4_bytes(outpoint.index);
    // value
    serial.write_8_bytes(output_value);
    // block_height
    serial.write_4_bytes(block_height);
    BITCOIN_ASSERT(
        std::distance(row_info.begin(), serial.iterator()) == 36 + 8 + 4);
    batch.Put(slice(addr_key), slice(row_info));
    return true;
}
示例#14
0
bool add_debit(leveldb::WriteBatch& batch,
    const transaction_input_type& input, const input_point& inpoint,
    uint32_t block_height)
{
    payment_address address;
    // Not a Bitcoin address so skip this output.
    if (!extract(address, input.script))
        return true;
    data_chunk addr_key = create_address_key(address, input.previous_output);
    // inpoint
    data_chunk row_info(36 + 4);
    auto serial = make_serializer(row_info.begin());
    // inpoint
    serial.write_hash(inpoint.hash);
    serial.write_4_bytes(inpoint.index);
    // block_height
    serial.write_4_bytes(block_height);
    BITCOIN_ASSERT(
        std::distance(row_info.begin(), serial.iterator()) == 36 + 4);
    batch.Put(slice(addr_key), slice(row_info));
    return true;
}
void history_scan_database::add(const address_bitset& key,
    const uint8_t marker, const point_type& point,
    uint32_t block_height, uint64_t value)
{
    BITCOIN_ASSERT(key.size() >= settings_.sharded_bitsize);
    // Both add() and sync() must have identical lookup of shards.
    hsdb_shard& shard = lookup(key);
    address_bitset sub_key = drop_prefix(key);
    BITCOIN_ASSERT(sub_key.size() == settings_.scan_bitsize());
#ifdef HSDB_DEBUG
    log_debug(LOG_HSDB) << "Sub key = " << sub_key;
#endif
    data_chunk row_data(settings_.row_value_size);
    auto serial = make_serializer(row_data.begin());
    serial.write_byte(marker);
    serial.write_hash(point.hash);
    serial.write_4_bytes(point.index);
    serial.write_4_bytes(block_height);
    serial.write_8_bytes(value);
    BITCOIN_ASSERT(serial.iterator() ==
        row_data.begin() + settings_.row_value_size);
    shard.add(sub_key, row_data);
}
void block_database::store(const block_type& block)
{
    const size_t height = index_.size();
    // Write block data.
    const hash_digest key = hash_block_header(block.header);
    const size_t number_txs = block.transactions.size();
    const size_t value_size = 80 + 4 + 4 + number_txs * hash_size;
    auto write = [&](uint8_t* data)
    {
        satoshi_save(block.header, data);
        auto serial = make_serializer(data + 80);
        serial.write_4_bytes(height);
        serial.write_4_bytes(number_txs);
        for (const transaction_type& tx: block.transactions)
        {
            const hash_digest tx_hash = hash_transaction(tx);
            serial.write_hash(tx_hash);
        }
    };
    const position_type position = map_.store(key, value_size, write);
    // Write height -> position mapping.
    write_position(position);
}
示例#17
0
文件: main.c 项目: kharazi/google
int main()
{
    FILE *corpus;
    FILE **allOutput;
    char a[100];
    char *token;
    char filename[100],addr[100];
    unsigned int i = 0,start=0;
    int need_to_cache = 0, counter = 0,template;
    HashTableStruct hashTable;
    PostingStruct data;
    PostingStruct docId;
    PostingStruct *docIdCursor = &docId;
    PostingStruct *docIdCursorTemp;
    unsigned int size;
    char *buffer, *lastWord;
    buffer = (char*) calloc(BUFFER_SIZE, sizeof(char));
    hashTable.rows = (HashCellStruct *) malloc(HASH_SIZE * sizeof(HashCellStruct));
    corpus = fopen("file.txt", "r");
    size = fread(buffer, 1, BUFFER_SIZE, corpus);
    printf("Start indexing ...\n");
    token = (char*) malloc(1);
    do{
        for(start=0,i=0;i<size;i++) {
            if(buffer[i]==' ' || buffer[i]=='\''|| buffer[i]==',' || buffer[i]=='\"' || buffer[i]=='\0' || buffer[i]=='\t' || buffer[i]=='\n' || i==size-1){
                if(i-start==0){
                    start++;
                    continue;
                }
                free(token);
                if(need_to_cache){
                    need_to_cache = 0;
                    token = (char *) calloc (strlen(lastWord)+i-start+1, sizeof(char)); 
                    memcpy(token,lastWord, strlen(lastWord));
                    data.position = start - strlen(lastWord);  
                    free(lastWord); 
                }
                else{
                    token = (char *) malloc (i-start+1);
                    token[i-start] = '\0';
                    memcpy(token,buffer+start,i-start);
                    data.position = start;
                }
                data.next = NULL;
                // printf("%d: %s\n",start,token);
                if(strcmp(token,"</XML>")==0){
//                    docIdCursor->position = i;
//                    docIdCursor->next = (PostingStruct *) malloc(sizeof(PostingStruct));
//                    docIdCursorTemp = docIdCursor;
//                    docIdCursor = docIdCursor->next;
                    start = i+1;
                    continue;
                }
                if(strcmp(token,"<XML>")==0){
                    docIdCursor->position = i;
                    docIdCursor->next = (PostingStruct *) malloc(sizeof(PostingStruct));
                    docIdCursorTemp = docIdCursor;
                    docIdCursor = docIdCursor->next;
                    start = i+1;
                    continue;
                }
                stem(token);
                if(strlen(token)>0){
                    insert(&token, i-start, &data, &hashTable);
                }
                start = i+1;
            }
            else if(i==size-1)
            {
                lastWord = (char*) malloc(size - start + 1);
                lastWord[size-start] = '\0';
                memcpy(lastWord,buffer+start,size-start);
                need_to_cache = 1;
            }
        }
        free(docIdCursor);
        docIdCursorTemp->next = NULL;
        // custom_free_hash_table(hashTable);
        itoa(counter,filename);
        strcpy(addr,"files/");
        strcat(addr,filename);
        write_hash(&hashTable, addr,"w", &docId);
        hash_report(&hashTable);
        custom_free_hash_table(&hashTable);
        free(hashTable.rows);
        hashTable.rows = (HashCellStruct *) malloc(HASH_SIZE * sizeof(HashCellStruct));
        size = fread(buffer, 1, BUFFER_SIZE, corpus);
        counter++;
    } while( size==BUFFER_SIZE); 
    fclose(corpus);
    printf("Indexing done!\n");
    free(buffer);
    custom_free_hash_table(&hashTable);
    allOutput = (FILE**) malloc (counter*sizeof(FILE*));
    docIdCursor = docId.next;
    if(docId.next!=NULL)
        while(docIdCursor->next!=NULL)
        {
            docIdCursorTemp = docIdCursor->next;
            free(docIdCursor); 
            docIdCursor = docIdCursorTemp;
        }
    printf("Start merging ...\n");
    if(counter>1){
        for(template=0;template<counter;template++){