bool FileMapInfo::initialize() { assert(UseSharedSpaces, "UseSharedSpaces expected."); printf("%s[%d] [tid: %lu]: 开始初始化FileMapInfo...\n", __FILE__, __LINE__, pthread_self()); if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space()) { fail_continue("Tool agent requires sharing to be disabled."); return false; } if (!open_for_read()) { return false; } init_from_file(_fd); if (!validate()) { return false; } SharedReadOnlySize = _header._space[0]._capacity; SharedReadWriteSize = _header._space[1]._capacity; SharedMiscDataSize = _header._space[2]._capacity; SharedMiscCodeSize = _header._space[3]._capacity; return true; }
// JVM/TI RedefineClasses() support: // Remap the shared readonly space to shared readwrite, private. bool FileMapInfo::remap_shared_readonly_as_readwrite() { struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0]; if (!si->_read_only) { // the space is already readwrite so we are done return true; } size_t used = si->_used; size_t size = align_size_up(used, os::vm_allocation_granularity()); if (!open_for_read()) { return false; } char *base = os::remap_memory(_fd, _full_path, si->_file_offset, si->_base, size, false /* !read_only */, si->_allow_exec); close(); if (base == NULL) { fail_continue("Unable to remap shared readonly space (errno=%d).", errno); return false; } if (base != si->_base) { fail_continue("Unable to remap shared readonly space at required address."); return false; } si->_read_only = false; return true; }
// Open the shared archive file, read and validate the header // information (version, boot classpath, etc.). If initialization // fails, shared spaces are disabled and the file is closed. [See // fail_continue.] // // Validation of the archive is done in two steps: // // [1] validate_header() - done here. This checks the header, including _paths_misc_info. // [2] validate_classpath_entry_table - this is done later, because the table is in the RW // region of the archive, which is not mapped yet. bool FileMapInfo::initialize() { assert(UseSharedSpaces, "UseSharedSpaces expected."); if (!open_for_read()) { return false; } init_from_file(_fd); if (!validate_header()) { return false; } SharedReadOnlySize = _header->_space[0]._capacity; SharedReadWriteSize = _header->_space[1]._capacity; SharedMiscDataSize = _header->_space[2]._capacity; SharedMiscCodeSize = _header->_space[3]._capacity; return true; }
void process(List *from, List *to) { List *l; /* ** Initialise read */ open_for_read(from); open_for_write(to); l = read_header(from); write_header(to,l); destroy_list(l); /* ** Process Gels */ for (l = read_gel_data(from); !isNil(l); l = read_gel_data(from)) { write_gel_data(to,l); destroy_list(l); } /* ** Process Contigs */ for (l = read_contig_data(from); !isNil(l); l = read_contig_data(from)) { write_contig_data(to,l); destroy_list(l); } /* ** Tidy up read */ close_files(from); close_files(to); }
// Open the shared archive file, read and validate the header // information (version, boot classpath, etc.). If initialization // fails, shared spaces are disabled and the file is closed. [See // fail_continue.] // // Validation of the archive is done in two steps: // // [1] validate_header() - done here. This checks the header, including _paths_misc_info. // [2] validate_classpath_entry_table - this is done later, because the table is in the RW // region of the archive, which is not mapped yet. bool FileMapInfo::initialize() { assert(UseSharedSpaces, "UseSharedSpaces expected."); if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space()) { fail_continue("Tool agent requires sharing to be disabled."); return false; } if (!open_for_read()) { return false; } init_from_file(_fd); if (!validate_header()) { return false; } SharedReadOnlySize = _header->_space[0]._capacity; SharedReadWriteSize = _header->_space[1]._capacity; SharedMiscDataSize = _header->_space[2]._capacity; SharedMiscCodeSize = _header->_space[3]._capacity; return true; }
rc_t log_core::fetch(lsn_t& ll, void* buf, lsn_t* nxt, const bool forward) { INC_TSTAT(log_fetches); lintel::atomic_thread_fence(lintel::memory_order_acquire); if (ll < _fetch_buf_end && ll >= _fetch_buf_begin) { // log record can be found in fetch buffer -- no I/O size_t i = ll.hi() - _fetch_buf_first; if (_fetch_buffers[i]) { logrec_t* rp = (logrec_t*) (_fetch_buffers[i] + ll.lo()); w_assert1(rp->valid_header(ll)); if (rp->type() == logrec_t::t_skip) { if (forward) { ll = lsn_t(ll.hi() + 1, 0); return fetch(ll, buf, nxt, forward); } else { // backward scan ll = *((lsn_t*) (_fetch_buffers[i] + ll.lo() - sizeof(lsn_t))); } rp = (logrec_t*) (_fetch_buffers[i] + ll.lo()); w_assert1(rp->valid_header(ll)); } if (nxt) { if (!forward && ll.lo() == 0) { auto p = _storage->get_partition(ll.hi() - 1); *nxt = p ? lsn_t(p->num(), p->get_size()) : lsn_t::null; } else { if (forward) { *nxt = ll; nxt->advance(rp->length()); } else { memcpy(nxt, (char*) rp - sizeof(lsn_t), sizeof(lsn_t)); } } } memcpy(buf, rp, rp->length()); INC_TSTAT(log_buffer_hit); return RCOK; } } if (forward && ll >= durable_lsn()) { w_assert1(ll == durable_lsn()); // reading the durable_lsn during recovery yields a skip log record, return RC(eEOF); } if (!forward && ll == lsn_t::null) { // for a backward scan, nxt pointer is set to null // when the first log record in the first partition is set return RC(eEOF); } auto p = _storage->get_partition(ll.hi()); if(!p) { return RC(eEOF); } W_DO(p->open_for_read()); logrec_t* rp; lsn_t prev_lsn = lsn_t::null; DBGOUT3(<< "fetch @ lsn: " << ll); W_COERCE(p->read(rp, ll, forward ? nullptr : &prev_lsn)); w_assert1(rp->valid_header(ll)); // handle skip log record if (rp->type() == logrec_t::t_skip) { p->release_read(); if (forward) { DBGTHRD(<<"seeked to skip" << ll ); DBGTHRD(<<"getting next partition."); ll = lsn_t(ll.hi() + 1, 0); p = _storage->get_partition(ll.hi()); if(!p) { return RC(eEOF); } // re-read DBGOUT3(<< "fetch @ lsn: " << ll); W_DO(p->open_for_read()); W_COERCE(p->read(rp, ll)); w_assert1(rp->valid_header(ll)); } else { // backward scan