void DatagramClTest::setUp() { // create a new event switch _esl = new ibrtest::EventSwitchLoop(); // enable blob path ibrcommon::File blob_path("/tmp/blobs"); // check if the BLOB path exists if (!blob_path.exists()) { // try to create the BLOB path ibrcommon::File::createDirectory(blob_path); } // enable the blob provider ibrcommon::BLOB::changeProvider(new ibrcommon::FileBLOBProvider(blob_path), true); // add standard memory base storage _storage = new dtn::storage::MemoryBundleStorage(); // make storage globally available dtn::core::BundleCore::getInstance().setStorage(_storage); dtn::core::BundleCore::getInstance().setSeeker(_storage); // create fake datagram service _fake_service = new FakeDatagramService(); _fake_cl = new DatagramConvergenceLayer( _fake_service ); // add convergence layer to bundle core dtn::core::BundleCore::getInstance().getConnectionManager().add(_fake_cl); // initialize BundleCore dtn::core::BundleCore::getInstance().initialize(); // start-up event switch _esl->start(); _fake_cl->initialize(); try { dtn::daemon::Component &c = dynamic_cast<dtn::daemon::Component&>(*_storage); c.initialize(); } catch (const std::bad_cast&) { } // startup BundleCore dtn::core::BundleCore::getInstance().startup(); _fake_cl->startup(); try { dtn::daemon::Component &c = dynamic_cast<dtn::daemon::Component&>(*_storage); c.startup(); } catch (const std::bad_cast&) { } }
static int builtin_diff_blobs(struct rev_info *revs, int argc, const char **argv, struct object_array_entry **blob) { unsigned mode = canon_mode(S_IFREG | 0644); if (argc > 1) usage(builtin_diff_usage); if (blob[0]->mode == S_IFINVALID) blob[0]->mode = mode; if (blob[1]->mode == S_IFINVALID) blob[1]->mode = mode; stuff_change(&revs->diffopt, blob[0]->mode, blob[1]->mode, &blob[0]->item->oid, &blob[1]->item->oid, 1, 1, blob_path(blob[0]), blob_path(blob[1])); diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); return 0; }
/* * Digest a local blob stream and store the digested blob. */ static int sha_fs_digest(struct request *r, int fd, char *hex_digest, int do_put) { char unsigned buf[4096], digest[20], *d, *d_end; char *h; blk_SHA_CTX ctx; int nread; int tmp_fd = -1; char tmp_path[MAX_FILE_PATH_LEN]; int status = 0; tmp_path[0] = 0; if (do_put) { static int drift = 0; if (drift++ >= 999) drift = 0; /* * Open a temporary file in $sha_fs_root/tmp to accumulate the * blob read from the stream. The file looks like * * digest-time-pid-drift */ snprintf(tmp_path, sizeof tmp_path, "%s/digest-%d-%u-%d", boot_data.tmp_dir_path, /* * Warning: * Casting time() to int is * incorrect!! */ (int)time((time_t *)0), getpid(), drift); /* * Open the file ... need O_LARGEFILE support!! * Need to catch EINTR!!!! */ tmp_fd = io_open(tmp_path, O_CREAT|O_EXCL|O_WRONLY|O_APPEND, S_IRUSR); if (tmp_fd < 0) _panic3(r, "digest: open(tmp) failed", tmp_path, strerror(errno)); } blk_SHA1_Init(&ctx); while ((nread = io_read(fd, buf, sizeof buf)) > 0) { blk_SHA1_Update(&ctx, buf, nread); if (do_put && io_write_buf(tmp_fd, buf, nread) != 0) _panic2(r, "digest: write_buf(tmp) failed", strerror(errno)); } if (nread < 0) { _error(r, "digest: _read() failed"); goto croak; } blk_SHA1_Final(digest, &ctx); if (do_put) { status = io_close(tmp_fd); tmp_fd = -1; if (status) _panic2(r,"digest: close(tmp) failed",strerror(errno)); } /* * Convert the binary sha digest to text. */ h = hex_digest; d = digest; d_end = d + 20; while (d < d_end) { *h++ = nib2hex[(*d & 0xf0) >> 4]; *h++ = nib2hex[*d & 0xf]; d++; } *h = 0; /* * Move the blob from the temporary file to the blob file. */ if (do_put) { blob_path(r, hex_digest); arbor_rename(tmp_path, ((struct sha_fs_request *)r->open_data)->blob_path); tmp_path[0] = 0; } goto cleanup; croak: status = -1; cleanup: if (tmp_fd > -1 && io_close(tmp_fd)) _panic2(r, "digest: close(tmp) failed", strerror(errno)); if (tmp_path[0] && io_unlink(tmp_path)) _panic3(r, "digest: unlink(tmp) failed", tmp_path, strerror(errno)); return status; }
static int sha_fs_put(struct request *r) { blk_SHA_CTX ctx; char tmp_path[MAX_FILE_PATH_LEN]; unsigned char chunk[CHUNK_SIZE], *cp, *cp_end; int fd = -1; int status = 0; char buf[MSG_SIZE]; /* * Open a temporary file in $sha_fs_root/tmp to accumulate the * blob read from the client. The file looks like * * [put|give]-time-pid-digest */ snprintf(tmp_path, sizeof tmp_path, "%s/%s-%d-%u-%s", boot_data.tmp_dir_path, r->verb, /* * Warning: * Casting time() to int is * incorrect!! */ (int)time((time_t *)0), getpid(), r->digest); /* * Open the file ... need O_LARGEFILE support!! * Need to catch EINTR!!!! */ fd = io_open(tmp_path, O_CREAT|O_EXCL|O_WRONLY|O_APPEND, S_IRUSR); if (fd < 0) { snprintf(buf, sizeof buf, "open(%s) failed: %s", tmp_path, strerror(errno)); _panic(r, buf); } /* * Initialize digest of blob being scanned from the client. */ blk_SHA1_Init(&ctx); /* * An empty blob is always put. * Note: the caller has already ensured that no more data has * been written by the client, so no need to check r->scan_size. */ if (strcmp(r->digest, empty_ascii) == 0) goto digested; /* * Copy what we have already read into the first chunk buffer. * * If we've read ahead more than we can chew, * then croak. This should never happen. */ if (r->scan_size > 0) { // Note: regress, sanity test ... remove later. if ((u8)r->scan_size != r->blob_size) _panic(r, "r->scan_size != r->blob_size"); if (r->scan_size > (int)(sizeof chunk - 1)) { snprintf(buf, sizeof buf, "max=%lu", (long unsigned)(sizeof chunk - 1)); _panic2(r, "scanned chunk too big", buf); } /* * See if the entire blob fits in the first read. */ if (eat_chunk(r, &ctx, fd, r->scan_buf, r->scan_size)) goto digested; } cp = chunk; cp_end = &chunk[sizeof chunk]; /* * Read more chunks until we see the blob. */ again: while (cp < cp_end) { int nread = blob_read(r, cp, cp_end - cp); /* * Read error from client, * so zap the partial, invalid blob. */ if (nread < 0) { _error(r, "blob_read() failed"); goto croak; } if (nread == 0) { _error(r, "blob_read() returns 0 before digest seen"); goto croak; } switch (eat_chunk(r, &ctx, fd, cp, nread)) { case -1: _panic(r, "eat_chunk(local) failed"); case 1: goto digested; } cp += nread; } cp = chunk; goto again; digested: if (fd >= 0) _close(r, &fd); /* * Move the temp blob file to the final blob path. */ blob_path(r, r->digest); arbor_rename(tmp_path, ((struct sha_fs_request *)r->open_data)->blob_path); goto cleanup; croak: status = -1; cleanup: if (fd > -1) _panic(r, "_close() failed"); if (tmp_path[0] && _unlink(r, tmp_path, (int *)0)) _panic(r, "_unlink() failed"); return status; }
static int sha_fs_eat(struct request *r) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; int status = 0; blk_SHA_CTX ctx; unsigned char digest[20]; int fd; unsigned char chunk[CHUNK_SIZE]; int nread; blob_path(r, r->digest); /* * Open the file to the blob. */ switch (_open(r, sp->blob_path, &fd)) { case 0: break; /* * Blob not found. */ case ENOENT: return 1; default: _panic(r, "_open(blob) failed"); } blk_SHA1_Init(&ctx); /* * Read a chunk from the file and chew. */ while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) /* * Update the incremental digest. */ blk_SHA1_Update(&ctx, chunk, nread); if (nread < 0) _panic(r, "_read(blob) failed"); /* * Finalize the digest. */ blk_SHA1_Final(digest, &ctx); /* * If the calculated digest does NOT match the stored digest, * then zap the blob from storage and get panicy. * A corrupt blob is a bad, bad thang. * * Note: unfortunately we've already deceived the client * by sending "ok". Probably need to improve for * the special case when the entire blob is read * in first chunk. */ if (memcmp(sp->digest, digest, 20)) _panic2(r, "stored blob doesn't match digest", r->digest); if (_close(r, &fd)) _panic(r, "_close(blob) failed"); return status; }
/* * Copy a local blob to a local stream. * * Return 0 if stream matches signature, -1 otherwise. * Note: this needs to be folded into sha_fs_get(). */ static int sha_fs_copy(struct request *r, int out_fd) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; int status = 0; blk_SHA_CTX ctx; unsigned char digest[20]; int fd; unsigned char chunk[CHUNK_SIZE]; int nread; static char n[] = "sha_fs_write"; blob_path(r, r->digest); /* * Open the file to the blob. */ switch (_open(r, sp->blob_path, &fd)) { case 0: break; case ENOENT: _warn3(r, n, "open(blob): not found", r->digest); return 1; default: _panic2(r, n, "_open(blob) failed"); } blk_SHA1_Init(&ctx); /* * Read a chunk from the file, write chunk to local stream, * update incremental digest. */ while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) { if (io_write_buf(out_fd, chunk, nread)) { _error2(r, n, "write_buf() failed"); goto croak; } /* * Update the incremental digest. */ blk_SHA1_Update(&ctx, chunk, nread); } if (nread < 0) _panic2(r, n, "_read(blob) failed"); /* * Finalize the digest. */ blk_SHA1_Final(digest, &ctx); /* * If the calculated digest does NOT match the stored digest, * then zap the blob from storage and get panicy. * A corrupt blob is a bad, bad thang. */ if (memcmp(sp->digest, digest, 20)) _panic3(r, n, "stored blob doesn't match digest", r->digest); goto cleanup; croak: status = -1; cleanup: if (_close(r, &fd)) _panic2(r, n, "_close(blob) failed"); return status; }
static int sha_fs_get(struct request *r) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; int status = 0; blk_SHA_CTX ctx; unsigned char digest[20]; int fd; unsigned char chunk[CHUNK_SIZE]; int nread; blob_path(r, r->digest); /* * Open the file to the blob. */ switch (_open(r, sp->blob_path, &fd)) { case 0: break; case ENOENT: return 1; default: _panic(r, "_open(blob) failed"); } /* * Tell the client we have the blob. */ if (write_ok(r)) { _error(r, "write_ok() failed"); goto croak; } blk_SHA1_Init(&ctx); /* * Read a chunk from the file, write chunk to client, * update incremental digest. * * In principle, we ought to first scan the blob file * before sending "ok" to the requestor. */ while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) { if (blob_write(r, chunk, nread)) { _error(r, "blob_write(blob chunk) failed"); goto croak; } /* * Update the incremental digest. */ blk_SHA1_Update(&ctx, chunk, nread); } if (nread < 0) _panic(r, "_read(blob) failed"); /* * Finalize the digest. */ blk_SHA1_Final(digest, &ctx); /* * If the calculated digest does NOT match the stored digest, * then zap the blob from storage and get panicy. * A corrupt blob is a bad, bad thang. * * Note: unfortunately we've already deceived the client * by sending "ok". Probably need to improve for * the special case when the entire blob is read * in first chunk. */ if (memcmp(sp->digest, digest, 20)) { _error2(r, "PANIC: stored blob doesn't match digest", r->digest); if (zap_blob(r)) _panic(r, "zap_blob() failed"); goto croak; } goto cleanup; croak: status = -1; cleanup: if (_close(r, &fd)) _panic(r, "_close(blob) failed"); return status; }
/* * main application method */ int main( int argc, char** argv ) { // catch process signals ibrcommon::SignalHandler sighandler(sighandler_func); sighandler.handle(SIGINT); sighandler.handle(SIGTERM); #ifndef __WIN32__ sighandler.handle(SIGUSR1); #endif // configration object config_t conf; // read the configuration read_configuration(argc, argv, conf); init_logger(conf); // initialize sighandler after possible exit call sighandler.initialize(); // init working directory if (conf.workdir.length() > 0) { ibrcommon::File blob_path(conf.workdir); if (blob_path.exists()) { ibrcommon::BLOB::changeProvider(new ibrcommon::FileBLOBProvider(blob_path), true); } } // backoff for reconnect unsigned int backoff = 2; ibrcommon::File outbox_file(conf.outbox); // create new file lists fileset new_files, prev_files, deleted_files, files_to_send; filelist observed_files; hashlist sent_hashes; // observed root file io::ObservedFile root(ibrcommon::File("/")); #ifdef HAVE_LIBTFFS io::FatImageReader *imagereader = NULL; #endif if (outbox_file.exists() && !outbox_file.isDirectory()) { #ifdef HAVE_LIBTFFS conf.fat = true; imagereader = new io::FatImageReader(conf.outbox); const io::FATFile fat_root(*imagereader, conf.path); root = io::ObservedFile(fat_root); #else IBRCOMMON_LOGGER_TAG(TAG,error) << "ERROR: image-file provided, but this tool has been compiled without libtffs support!" << IBRCOMMON_LOGGER_ENDL; return -1; #endif } else { if (!outbox_file.exists()) { ibrcommon::File::createDirectory(outbox_file); } root = io::ObservedFile(outbox_file); } IBRCOMMON_LOGGER_TAG(TAG,info) << "-- dtnoutbox --" << IBRCOMMON_LOGGER_ENDL; // loop, if no stop if requested while (_running) { try { // Create a stream to the server using TCP. ibrcommon::vaddress addr("localhost", 4550); ibrcommon::socketstream conn(new ibrcommon::tcpsocket(addr)); // Initiate a client for synchronous receiving dtn::api::Client client(conf.name, conn, dtn::api::Client::MODE_SENDONLY); // Connect to the server. Actually, this function initiate the // stream protocol by starting the thread and sending the contact header. client.connect(); // reset backoff if connected backoff = 2; // check the connection while (_running) { // get all files fileset current_files; root.findFiles(current_files); // determine deleted files fileset deleted_files; std::set_difference(prev_files.begin(), prev_files.end(), current_files.begin(), current_files.end(), std::inserter(deleted_files, deleted_files.begin())); // remove deleted files from observation for (fileset::const_iterator iter = deleted_files.begin(); iter != deleted_files.end(); ++iter) { const io::ObservedFile &deletedFile = (*iter); // remove references in the sent_hashes for (hashlist::iterator hash_it = sent_hashes.begin(); hash_it != sent_hashes.end(); /* blank */) { if ((*hash_it).getPath() == deletedFile.getFile().getPath()) { sent_hashes.erase(hash_it++); } else { ++hash_it; } } // remove from observed files observed_files.remove(deletedFile); // output IBRCOMMON_LOGGER_TAG(TAG,info) << "file removed: " << deletedFile.getFile().getBasename() << IBRCOMMON_LOGGER_ENDL; } // determine new files fileset new_files; std::set_difference(current_files.begin(), current_files.end(), prev_files.begin(), prev_files.end(), std::inserter(new_files, new_files.begin())); // add new files to observation for (fileset::const_iterator iter = new_files.begin(); iter != new_files.end(); ++iter) { const io::ObservedFile &of = (*iter); int reg_ret = regexec(&conf.regex, of.getFile().getBasename().c_str(), 0, NULL, 0); if (!reg_ret && !conf.invert) continue; if (reg_ret && conf.invert) continue; // print error message, if regex error occurs if (reg_ret && reg_ret != REG_NOMATCH) { char msgbuf[100]; regerror(reg_ret,&conf.regex,msgbuf,sizeof(msgbuf)); IBRCOMMON_LOGGER_TAG(TAG,info) << "ERROR: regex match failed : " << std::string(msgbuf) << IBRCOMMON_LOGGER_ENDL; } // add new file to the observed set observed_files.push_back(of); // log output IBRCOMMON_LOGGER_TAG(TAG, info) << "file found: " << of.getFile().getBasename() << IBRCOMMON_LOGGER_ENDL; } // store current files for the next round prev_files.clear(); prev_files.insert(current_files.begin(), current_files.end()); IBRCOMMON_LOGGER_TAG(TAG, notice) << "file statistics: " << observed_files.size() << " observed, " << deleted_files.size() << " deleted, " << new_files.size() << " new" << IBRCOMMON_LOGGER_ENDL; // find files to send, create std::list files_to_send.clear(); IBRCOMMON_LOGGER_TAG(TAG, notice) << "updating observed files:" << IBRCOMMON_LOGGER_ENDL; for (filelist::iterator iter = observed_files.begin(); iter != observed_files.end(); ++iter) { io::ObservedFile &of = (*iter); // tick and update all files of.update(); if (of.getStableCounter() > conf.rounds) { if (sent_hashes.find(of.getHash()) == sent_hashes.end()) { sent_hashes.insert(of.getHash()); files_to_send.insert(*iter); } } IBRCOMMON_LOGGER_TAG(TAG, notice) << "\t" << of.getFile().getBasename() << ": " << of.getStableCounter() << IBRCOMMON_LOGGER_ENDL; } if (!files_to_send.empty()) { std::stringstream ss; for (fileset::const_iterator it = files_to_send.begin(); it != files_to_send.end(); ++it) { ss << (*it).getFile().getBasename() << " "; } IBRCOMMON_LOGGER_TAG("dtnoutbox",info) << "files sent: " << ss.str() << IBRCOMMON_LOGGER_ENDL; try { // create a blob ibrcommon::BLOB::Reference blob = ibrcommon::BLOB::create(); // write files into BLOB while it is locked { ibrcommon::BLOB::iostream stream = blob.iostream(); io::TarUtils::write(*stream, root, files_to_send); } // create a new bundle dtn::data::EID destination = EID(conf.destination); // create a new bundle dtn::data::Bundle b; // set destination b.destination = destination; // add payload block using the blob b.push_back(blob); // set destination address to non-singleton, if configured if (conf.bundle_group) b.set(dtn::data::PrimaryBlock::DESTINATION_IS_SINGLETON, false); // send the bundle client << b; client.flush(); } catch (const ibrcommon::IOException &e) { IBRCOMMON_LOGGER_TAG(TAG,error) << "send failed: " << e.what() << IBRCOMMON_LOGGER_ENDL; } } // wait defined seconds ibrcommon::MutexLock l(_wait_cond); IBRCOMMON_LOGGER_TAG(TAG, notice) << conf.interval <<" ms wait" << IBRCOMMON_LOGGER_ENDL; while (!_wait_abort && _running) { _wait_cond.wait(conf.interval); } _wait_abort = false; } // clean up regex regfree(&conf.regex); // close the client connection client.close(); // close the connection conn.close(); } catch (const ibrcommon::socket_exception&) { if (_running) { IBRCOMMON_LOGGER_TAG(TAG,error) << "Connection to bundle daemon failed. Retry in " << backoff << " seconds." << IBRCOMMON_LOGGER_ENDL; ibrcommon::Thread::sleep(backoff * 1000); // if backoff < 10 minutes if (backoff < 600) { // set a new backoff backoff = backoff * 2; } } } catch (const ibrcommon::IOException&) { if (_running) { IBRCOMMON_LOGGER_TAG(TAG,error) << "Connection to bundle daemon failed. Retry in " << backoff << " seconds." << IBRCOMMON_LOGGER_ENDL; ibrcommon::Thread::sleep(backoff * 1000); // if backoff < 10 minutes if (backoff < 600) { // set a new backoff backoff = backoff * 2; } } } catch (const std::exception&) { }; } // clear observed files observed_files.clear(); #ifdef HAVE_LIBTFFS // clean-up if (imagereader != NULL) delete imagereader; #endif return (EXIT_SUCCESS); }
/* * main application method */ int main(int argc, char** argv) { // catch process signals ibrcommon::SignalHandler sighandler(term); sighandler.handle(SIGINT); sighandler.handle(SIGTERM); // read the configuration read_configuration(argc, argv); //initialize sighandler after possible exit call sighandler.initialize(); if (_conf_workdir.length() > 0) { ibrcommon::File blob_path(_conf_workdir); if (blob_path.exists()) { ibrcommon::BLOB::changeProvider(new ibrcommon::FileBLOBProvider(blob_path), true); } } // backoff for reconnect unsigned int backoff = 2; // loop, if no stop if requested while (_running) { try { // Create a stream to the server using TCP. ibrcommon::vaddress addr("localhost", 4550); ibrcommon::socketstream conn(new ibrcommon::tcpsocket(addr)); // set the connection globally _conn = &conn; // Initiate a client for synchronous receiving dtn::api::Client client(_conf_name, conn); // Connect to the server. Actually, this function initiate the // stream protocol by starting the thread and sending the contact header. client.connect(); // reset backoff if connected backoff = 2; // check the connection while (_running) { // receive the bundle dtn::data::Bundle b = client.getBundle(); if(!_conf_quiet) std::cout << "received bundle: " << b.toString() << std::endl; // get the reference to the blob ibrcommon::BLOB::Reference ref = b.find<dtn::data::PayloadBlock>().getBLOB(); // write files into BLOB while it is locked { ibrcommon::BLOB::iostream stream = ref.iostream(); io::TarUtils::read(_conf_inbox, *stream); } } // close the client connection client.close(); // close the connection conn.close(); // set the global connection to NULL _conn = NULL; } catch (const ibrcommon::socket_exception&) { // set the global connection to NULL _conn = NULL; if (_running) { std::cout << "Connection to bundle daemon failed. Retry in " << backoff << " seconds." << std::endl; ibrcommon::Thread::sleep(backoff * 1000); // if backoff < 10 minutes if (backoff < 600) { // set a new backoff backoff = backoff * 2; } } } catch (const ibrcommon::IOException&) { // set the global connection to NULL _conn = NULL; if (_running) { std::cout << "Connection to bundle daemon failed. Retry in " << backoff << " seconds." << std::endl; ibrcommon::Thread::sleep(backoff * 1000); // if backoff < 10 minutes if (backoff < 600) { // set a new backoff backoff = backoff * 2; } } } catch (const std::exception&) { // set the global connection to NULL _conn = NULL; } } return (EXIT_SUCCESS); }
void BundleStorageTest::setUp() { // create a new event switch esl = new ibrtest::EventSwitchLoop(); // enable blob path ibrcommon::File blob_path("/tmp/blobs"); // check if the BLOB path exists if (!blob_path.exists()) { // try to create the BLOB path ibrcommon::File::createDirectory(blob_path); } // enable the blob provider ibrcommon::BLOB::changeProvider(new ibrcommon::FileBLOBProvider(blob_path), true); switch (testCounter++) { case 0: { // add standard memory base storage _storage = new dtn::storage::MemoryBundleStorage(); break; } case 1: { // prepare path for the disk based storage ibrcommon::File path("/tmp/bundle-disk-test"); if (path.exists()) path.remove(true); ibrcommon::File::createDirectory(path); // add disk based storage _storage = new dtn::storage::SimpleBundleStorage(path); break; } #ifdef HAVE_SQLITE case 2: { // prepare path for the sqlite based storage ibrcommon::File path("/tmp/bundle-sqlite-test"); if (path.exists()) path.remove(true); ibrcommon::File::createDirectory(path); // prepare a sqlite database _storage = new dtn::storage::SQLiteBundleStorage(path, 0); break; } #endif } if (testCounter >= _storage_names.size()) testCounter = 0; // start-up event switch esl->start(); try { dtn::daemon::Component &c = dynamic_cast<dtn::daemon::Component&>(*_storage); c.initialize(); c.startup(); } catch (const bad_cast&) { } }