ROMappedFile &ROMappedFile::map_file(const char *filepath) { unmap_file(); assert(!is_mapped()); #ifndef WIN32 const int fid = open(filepath, O_RDONLY); if (fid < 0) { return *this; } struct stat fid_info; memset(&fid_info, 0, sizeof(fid_info)); // check for 32 bit limitations - off_t will be 32 bit on a 32 bit system, so we will be limited // to mapping 2gig there if (fstat(fid, &fid_info) < 0 || fid_info.st_size > std::numeric_limits<off_t>::max()) { close(fid); return *this; } _mem = mmap(0, fid_info.st_size, PROT_READ, MAP_PRIVATE, fid, 0); // mmap keeps a reference to the handle, keeping the file open effectively close(fid); if (_mem) { _len = fid_info.st_size; } assert (is_mapped()); #endif return *this; }
/** Pre-unparsing updates */ bool SgAsmElfSegmentTable::reallocate() { bool reallocated = false; /* Resize based on word size from ELF File Header */ size_t opt_size, nentries; rose_addr_t need = calculate_sizes(NULL, NULL, &opt_size, &nentries); if (need < get_size()) { if (is_mapped()) { ROSE_ASSERT(get_mapped_size()==get_size()); set_mapped_size(need); } set_size(need); reallocated = true; } else if (need > get_size()) { get_file()->shift_extend(this, 0, need-get_size(), SgAsmGenericFile::ADDRSP_ALL, SgAsmGenericFile::ELASTIC_HOLE); reallocated = true; } /* Update data members in the ELF File Header. No need to return true for these changes. */ SgAsmElfFileHeader *fhdr = dynamic_cast<SgAsmElfFileHeader*>(get_header()); fhdr->set_phextrasz(opt_size); fhdr->set_e_phnum(nentries); return reallocated; }
/* Pre-unparsing updates */ bool SgAsmPESectionTable::reallocate() { bool reallocated = false; /* Resize based on section having largest ID */ SgAsmPEFileHeader *fhdr = dynamic_cast<SgAsmPEFileHeader*>(get_header()); ROSE_ASSERT(fhdr != NULL); SgAsmGenericSectionPtrList sections = fhdr->get_sections()->get_sections(); int max_id = 0; for (size_t i=0; i<sections.size(); i++) { max_id = std::max(max_id, sections[i]->get_id()); } size_t nsections = max_id; /*PE section IDs are 1-origin*/ size_t need = nsections * sizeof(SgAsmPESectionTableEntry::PESectionTableEntry_disk); if (need < get_size()) { if (is_mapped()) { ROSE_ASSERT(get_mapped_size()==get_size()); set_mapped_size(need); } set_size(need); reallocated = true; } else if (need > get_size()) { get_file()->shift_extend(this, 0, need-get_size(), SgAsmGenericFile::ADDRSP_ALL, SgAsmGenericFile::ELASTIC_HOLE); reallocated = true; } return reallocated; }
void PrintableWindow::hide() { if (is_mapped()) { HWND hwnd = Window::rep()->msWindow(); //printf("hide %p\n", this); ShowWindow(hwnd, SW_HIDE); } }
void accessor<BufferT>::set(uint32_t offset, uint32_t count, const type* p) { if(!is_mapped()) return; buffer_ptr_->mapped_copy(p, offset, count); enqueue_flush(range(offset, count)); }
static void add_fields (GString *str, GomResourceClass *klass) { GParamSpec **pspecs; gboolean mapped = FALSE; guint n_pspecs; guint i; pspecs = g_object_class_list_properties(G_OBJECT_CLASS(klass), &n_pspecs); for (i = 0; i < n_pspecs; i++) { if (is_mapped(pspecs[i])) { if (mapped) { g_string_append(str, ", "); } klass = g_type_class_peek(pspecs[i]->owner_type); g_string_append_printf(str, "'%s'.'%s' AS '%s'", klass->table, pspecs[i]->name, pspecs[i]->name); mapped = TRUE; } } g_free(pspecs); g_string_append(str, " "); }
void accessor<BufferT>::set(uint32_t offset, const std::vector<type>& v) { if(!is_mapped()) return; buffer_ptr_->mapped_copy(v, offset, v.size()); enqueue_flush(range(offset, v.size())); }
bool SgAsmElfSection::reallocate() { bool reallocated = false; SgAsmElfSectionTableEntry *sechdr = get_section_entry(); SgAsmElfSegmentTableEntry *seghdr = get_segment_entry(); /* Change section size if this section was defined in the ELF Section Table */ if (sechdr!=NULL) { rose_addr_t need = calculate_sizes(NULL, NULL, NULL, NULL); if (need < get_size()) { if (is_mapped()) { ROSE_ASSERT(get_mapped_size()==get_size()); set_mapped_size(need); } set_size(need); reallocated = true; } else if (need > get_size()) { get_file()->shift_extend(this, 0, need-get_size(), SgAsmGenericFile::ADDRSP_ALL, SgAsmGenericFile::ELASTIC_HOLE); reallocated = true; } } /* Update entry in the ELF Section Table and/or ELF Segment Table */ if (sechdr) sechdr->update_from_section(this); if (seghdr) seghdr->update_from_section(this); return reallocated; }
void accessor<BufferT>::set(uint32_t offset, const type& i) { if(!is_mapped()) return; buffer_ptr_->operator[](offset) = i; enqueue_flush(range(offset, 1)); }
/* Print some debugging info */ void SgAsmGenericSection::dump(FILE *f, const char *prefix, ssize_t idx) const { char p[4096]; if (idx>=0) { sprintf(p, "%sSection[%zd].", prefix, idx); } else { sprintf(p, "%sSection.", prefix); } const int w = std::max(1, DUMP_FIELD_WIDTH-(int)strlen(p)); fprintf(f, "%s%-*s = \"%s\"", p, w, "name", p_name->get_string(true).c_str()); if (!p_short_name.empty()) fprintf(f, " (%s)", p_short_name.c_str()); fprintf(f, "\n"); fprintf(f, "%s%-*s = %d\n", p, w, "id", p_id); fprintf(f, "%s%-*s = 0x%08"PRIx64" (%"PRIu64") bytes into file\n", p, w, "offset", p_offset, p_offset); fprintf(f, "%s%-*s = 0x%08"PRIx64" (%"PRIu64") bytes\n", p, w, "size", get_size(), get_size()); if (0==get_file_alignment()) { fprintf(f, "%s%-*s = not specified\n", p, w, "file_alignment"); } else { fprintf(f, "%s%-*s = 0x%08"PRIx64" (%"PRIu64") %s\n", p, w, "file_alignment", get_file_alignment(), get_file_alignment(), 0==get_offset()%get_file_alignment()?"satisfied":"NOT SATISFIED"); } fprintf(f, "%s%-*s = %s\n", p, w, "synthesized", p_synthesized?"yes":"no"); if (p_header) { fprintf(f, "%s%-*s = \"%s\"\n", p, w, "header", p_header->get_name()->get_string(true).c_str()); } else { fprintf(f, "%s%-*s = not associated\n", p, w, "header"); } std::string purpose = stringifySgAsmGenericSectionSectionPurpose(p_purpose); fprintf(f, "%s%-*s = %s\n", p, w, "purpose", purpose.c_str()); if (is_mapped()) { fprintf(f, "%s%-*s = rva=0x%08"PRIx64", size=%"PRIu64" bytes\n", p, w, "mapped", p_mapped_preferred_rva, p_mapped_size); if (0==get_mapped_alignment()) { fprintf(f, "%s%-*s = not specified\n", p, w, "mapped_alignment"); } else { fprintf(f, "%s%-*s = 0x%08"PRIx64" (%"PRIu64") %s\n", p, w, "mapped_alignment", get_mapped_alignment(), get_mapped_alignment(), 0==get_mapped_preferred_rva()%get_mapped_alignment()?"satisfied":"NOT SATISFIED"); } fprintf(f, "%s%-*s = %c%c%c\n", p, w, "permissions", get_mapped_rperm()?'r':'-', get_mapped_wperm()?'w':'-', get_mapped_xperm()?'x':'-'); } else { fprintf(f, "%s%-*s = <not mapped>\n", p, w, "mapped"); } fprintf(f, "%s%-*s = %s\n", p, w, "contains_code", get_contains_code()?"true":"false"); fprintf(f, "%s%-*s = 0x%08"PRIx64" (%"PRIu64") \n", p, w, "mapped_actual_va", p_mapped_actual_va, p_mapped_actual_va); // DQ (8/31/2008): Output the contents if this not derived from (there is likely a // better implementation if the hexdump function was a virtual member function). if (variantT() == V_SgAsmGenericSection) { hexdump(f, 0, std::string(p)+"data at ", p_data); } }
/** Pre-unparsing adjustments */ bool SgAsmElfNoteSection::reallocate() { bool reallocated = SgAsmElfSection::reallocate(); /* How much space is needed by the notes? */ rose_addr_t need = 0; for (size_t i=0; i<p_entries->get_entries().size(); i++) { SgAsmElfNoteEntry *ent = p_entries->get_entries()[i]; need += ent->calculate_size(); } /* Adjust the section/segment size */ if (need < get_size()) { if (is_mapped()) { ROSE_ASSERT(get_mapped_size()==get_size()); set_mapped_size(need); } set_size(need); reallocated = true; } else if (need > get_size()) { get_file()->shift_extend(this, 0, need-get_size(), SgAsmGenericFile::ADDRSP_ALL, SgAsmGenericFile::ELASTIC_HOLE); reallocated = true; } return reallocated; }
void accessor<BufferT>::set(const range& blk, uint32_t offset, const type& i) { if(!blk.is_valid() || offset >= blk.count || !is_mapped()) return; const auto idx = map_start_ == 0 ? blk.start + offset : offset; buffer_ptr_->operator[](idx) = i; enqueue_flush(range(idx, 1)); }
void accessor<BufferT>::set(const range& blk, uint32_t offset, uint32_t count, const type* p) { if(!blk.is_valid() || offset + count > blk.count || !is_mapped()) return; const uint32_t idx = map_start_ == 0 ? blk.start + offset : offset; buffer_ptr_->mapped_copy(p, idx, count); enqueue_flush(range(idx, count)); }
void accessor<BufferT>::set(const range& blk, uint32_t offset, const std::vector<type>& v) { if(!blk.is_valid() || offset + v.size() > blk.count || !is_mapped()) return; const uint32_t idx = map_start_ == 0 ? blk.start + offset : offset, count = uint32_t(v.size()); buffer_ptr_->mapped_copy(v, idx, count); enqueue_flush(range(idx, uint32_t(v.size()))); }
/** Returns (non-relative) virtual address if mapped, zero otherwise. */ rose_addr_t SgAsmGenericSection::get_mapped_preferred_va() const { ROSE_ASSERT(this != NULL); if (is_mapped()) return get_base_va() + get_mapped_preferred_rva(); return 0; }
/** Returns the file offset associated with the virtual address of a mapped section. * * NOTE: The MemoryMap class is a better interface to this same information. [RPM 2009-09-09] */ rose_addr_t SgAsmGenericSection::get_va_offset(rose_addr_t va) const { ROSE_ASSERT(is_mapped()); ROSE_ASSERT(va >= get_base_va()); rose_addr_t rva = va - get_base_va(); ROSE_ASSERT(rva >= get_mapped_preferred_rva()); return get_offset() + (rva - get_mapped_preferred_rva()); }
static SCM scm_is_mapped(SCM client_smob) { /* if (scm_equal_p(client_smob, SCM_UNSPECIFIED)) */ /* return SCM_BOOL_F; */ client_t *client = (client_t *)SCM_SMOB_DATA(client_smob); if (is_mapped(client)) return SCM_BOOL_T; else return SCM_BOOL_F; }
void PrintableWindow::hide() { if (bound()) { if (is_mapped()) { xplace(xleft(), xtop()); //printf("hide %lx %d %d\n", (long)this, xleft_, xtop_); WindowRep& w = *((Window*)this)->rep(); XWithdrawWindow(display()->rep()->display_, w.xwindow_, display()->rep()->screen_); } } }
ROMappedFile &ROMappedFile::unmap_file() { #ifndef WIN32 if (is_mapped()) { if (munmap(_mem, _len) == 0) { _mem = 0; _len = 0; } else { assert(false); } } #endif return *this; }
static SCM scm_visible_clients(void) { SCM clients = SCM_EOL; SCM smob; client_t *client = client_list; while (client) { if (is_mapped(client)) { SCM_NEWSMOB(smob, client_tag, client); clients = scm_append(scm_list_2(clients, scm_list_1(smob))); } client = client->next; } return clients; }
/* #<pydoc> def refresh_debugger_memory(): """ Refreshes the debugger memory @return: Nothing """ pass #</pydoc> */ static PyObject *refresh_debugger_memory() { invalidate_dbgmem_config(); invalidate_dbgmem_contents(BADADDR, 0); // Ask the debugger to populate debug names if ( dbg != NULL ) dbg->suspended(true); // Invalidate the cache is_mapped(0); PYW_GIL_CHECK_LOCKED_SCOPE(); Py_RETURN_NONE; }
PyObject* vm_is_mapped(VmMngr* self, PyObject* args) { PyObject *ad; PyObject *size; uint64_t b_ad; size_t b_size; int ret; if (!PyArg_ParseTuple(args, "OO", &ad, &size)) RAISE(PyExc_TypeError,"Cannot parse arguments"); PyGetInt_uint64_t(ad, b_ad); PyGetInt_size_t(size, b_size); ret = is_mapped(&self->vm_mngr, b_ad, b_size); return PyLong_FromUnsignedLongLong((uint64_t)ret); }
static gboolean do_prop_on_insert (GParamSpec *pspec, GomResourceClass *klass, GType resource_type) { #define IS_TOPLEVEL(t) (g_type_parent((t)) == GOM_TYPE_RESOURCE) #define IS_PRIMARY_KEY(p) (!g_strcmp0((p)->name, klass->primary_key)) #define BELONGS_TO_TYPE(p, t) ((p)->owner_type == (t)) return ((is_mapped(pspec)) && (!(IS_TOPLEVEL(resource_type) && IS_PRIMARY_KEY(pspec))) && (!(!IS_PRIMARY_KEY(pspec) && !BELONGS_TO_TYPE(pspec, resource_type)))); #undef IS_TOPLEVEL #undef IS_PRIMARY_KEY #undef BELONGS_TO_TYPE }
static SCM scm_focus_client(SCM client_smob) { client_t *client = NULL; if (scm_is_eq(client_smob, SCM_UNSPECIFIED)) client = client_list; // Use first client in list if we aren't given a good client_smob else client = (client_t *)SCM_SMOB_DATA(client_smob); if (!client) return SCM_UNSPECIFIED; if (!is_mapped(client)) return SCM_UNSPECIFIED; set_focus_client(client); return SCM_UNSPECIFIED; }
/** Increase file offset and mapping address to satisfy alignment constraints. This is typically done when initializing a new * section. The constructor places the new section at the end of the file before it knows what the alignment constraints will * be. The user should then set the alignment constraints (see set_file_alignment() and set_mapped_alignment()) and call this * method. This method must be called before any additional sections are appended to the file. * * The file offset and memory mapping address are adjusted independently. * * On the other hand, if additional sections are in the way, they must first be moved out of the way with the * SgAsmGenericFile::shift_extend() method. * * Returns true if the file offset and/or mapping address changed as a result of this call. */ bool SgAsmGenericSection::align() { bool changed = false; if (get_file_alignment()>0) { rose_addr_t old_offset = get_offset(); rose_addr_t new_offset = ALIGN_UP(old_offset, get_file_alignment()); set_offset(new_offset); changed = changed ? true : (old_offset!=new_offset); } if (is_mapped() && get_mapped_alignment()>0) { rose_addr_t old_rva = get_mapped_preferred_rva(); rose_addr_t new_rva = ALIGN_UP(old_rva, get_mapped_alignment()); set_mapped_preferred_rva(new_rva); changed = changed ? true : (old_rva!=new_rva); } return changed; }
void reserve() { BOOST_ASSERT( ptr_ == 0 ); if( !count_ ) { return; } if( is_mapped() ) { boost::filesystem::path tmp( boost::filesystem::temp_directory_path() ); filename_ = tmp.string() + "/mm_XXXXXX"; char name[256]; strcpy( name, filename_.c_str() ); fd_ = mkstemp( name ); filename_ = std::string( name ); if( fd_ == -1 ) { std::cerr << "Unable to create map file: " << filename_ << std::endl; return; } off_t file_size( bytes() ); ftruncate( fd_, file_size ); void* mem = mmap( 0, file_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd_, 0 ); BOOST_ASSERT( mem != MAP_FAILED ); ptr_ = static_cast<num_type*>( mem ); } else { ptr_ = new num_type[count_]; } }
Mapping(const bam_hdr_t * hdr_p, bam1_t * rec_p) : _rec_p(rec_p) { _query_name = bam_get_qname(rec_p); _flag = rec_p->core.flag; for (int i = 0; i < rec_p->core.l_qseq; ++i) { _seq += seq_nt16_str[bam_seqi(bam_get_seq(rec_p), i)]; } if (is_mapped()) { _chr_name = hdr_p->target_name[rec_p->core.tid]; _rf_start = rec_p->core.pos; _cigar = Cigar(bam_get_cigar(rec_p), rec_p->core.n_cigar); _rf_len = _cigar.rf_len(); } if (is_paired() and mp_is_mapped()) { _mp_chr_name = hdr_p->target_name[rec_p->core.mtid]; _mp_rf_start = rec_p->core.mpos; } }
void release() { if( !ptr_ ) { return; } if( is_mapped() ) { munmap( ptr_, bytes() ); close( fd_ ); boost::filesystem::remove( boost::filesystem::path( filename_.c_str() ) ); } else { delete[] ptr_; } ptr_ = 0; }
/* Change size of PE header based on word size */ bool SgAsmPEFileHeader::reallocate() { bool reallocated = SgAsmGenericHeader::reallocate(); /* Resize if necessary */ rose_addr_t need = sizeof(PEFileHeader_disk); if (4==get_word_size()) { need += sizeof(PE32OptHeader_disk); } else if (8==get_word_size()) { need += sizeof(PE64OptHeader_disk); } else { throw FormatError("unsupported PE word size"); } need += p_rvasize_pairs->get_pairs().size() * sizeof(SgAsmPERVASizePair::RVASizePair_disk); if (need<get_size()) { if (is_mapped()) { ROSE_ASSERT(get_mapped_size()==get_size()); set_mapped_size(need); } set_size(need); reallocated = true; } else if (need>get_size()) { get_file()->shift_extend(this, 0, need-get_size(), SgAsmGenericFile::ADDRSP_ALL, SgAsmGenericFile::ELASTIC_HOLE); reallocated = true; } /* Make sure the RVA/Size pairs at the end of the header are consistent with the sections to which they point. Reallocate() * has already been called recursively for the sections. */ update_rvasize_pairs(); /* Make sure header is consistent with sections. Reallocate() has already been called recursively for the sections. * Count the number of sections in the table and update the header's e_nsections member. */ if (p_section_table) { ROSE_ASSERT(p_section_table->get_header()==this); SgAsmGenericSectionList *all = get_sections(); p_e_nsections = 0; for (size_t i=0; i<all->get_sections().size(); i++) { SgAsmPESection *pesec = dynamic_cast<SgAsmPESection*>(all->get_sections()[i]); if (pesec && pesec->get_section_entry()!=NULL) p_e_nsections++; } rose_addr_t header_size = ALIGN_UP(p_section_table->get_offset() + p_section_table->get_size(), p_e_file_align>0 ? p_e_file_align : 1); #if 1 /* The PE Specification regarding e_header_size (known as "SizeOfHeader" on page 14 of "Microsoft Portable Executable * and Common Object File Format Specification: Revision 8.1 February 15, 2008" is not always followed. We recompute * it here as being the minimum RVA from all the sections defined in the PE Section Table, but not smaller * than the value according to the specification. This alternate value is kept if it's already in the parse tree, * otherwise we use the correct value. (RPM 2008-10-21) */ rose_addr_t min_offset = 0; for (size_t i=0, nfound=0; i<all->get_sections().size(); i++) { SgAsmPESection *pesec = dynamic_cast<SgAsmPESection*>(all->get_sections()[i]); if (pesec && pesec->get_section_entry()!=NULL) { if (0==nfound++) { min_offset = pesec->get_offset(); } else { min_offset = std::min(min_offset, pesec->get_offset() ); } } } rose_addr_t header_size2 = std::max(header_size, min_offset); if (p_e_header_size==header_size2) header_size = header_size2; /* If the original header size was zero then don't change that--leave it at zero. Some tiny executables have a zero * value here and as a result, since this is near the end of the NT Optional Header, they can truncate the file and * the loader will fill the optional header with zeros when reading. (RPM 2008-11-11) */ if (p_e_header_size==0) header_size = 0; #endif p_e_header_size = header_size; } /* The size of the optional header. If there's a section table then we use its offset to calculate the optional header * size in order to be compatible with the PE loader. Otherwise use the actual optional header size. */ if (p_section_table) { ROSE_ASSERT(p_section_table->get_offset() >= get_offset() + sizeof(PEFileHeader_disk)); p_e_nt_hdr_size = p_section_table->get_offset() - (get_offset() + sizeof(PEFileHeader_disk)); } else if (4==get_word_size()) { p_e_nt_hdr_size = sizeof(PE32OptHeader_disk); } else if (8==get_word_size()) { p_e_nt_hdr_size = sizeof(PE64OptHeader_disk); } else { throw FormatError("invalid PE word size"); } /* Update COFF symbol table related data members in the file header */ if (get_coff_symtab()) { ROSE_ASSERT(get_coff_symtab()->get_header()==this); set_e_coff_symtab(get_coff_symtab()->get_offset()); set_e_coff_nsyms(get_coff_symtab()->get_nslots()); } /* Update some additional header fields */ set_e_num_rvasize_pairs(get_rvasize_pairs()->get_pairs().size()); set_e_opt_magic(4==get_word_size() ? 0x010b : 0x020b); set_e_lmajor((get_exec_format()->get_version() >> 16) & 0xffff); set_e_lminor(get_exec_format()->get_version() & 0xffff); /* Adjust the COFF Header's e_nt_hdr_size to accommodate the NT Optional Header in such a way that EXEs from tinype.com * don't change (i.e., don't increase e_nt_hdr_size if the bytes beyond it are zero anyway, and if they aren't then adjust * it as little as possible. The RVA/Size pairs are considered to be part of the NT Optional Header. */ size_t oh_size = p_rvasize_pairs->get_pairs().size() * sizeof(SgAsmPERVASizePair::RVASizePair_disk); size_t rvasize_offset; /*offset with respect to "oh" buffer allocated below*/ if (4==get_word_size()) { oh_size += sizeof(PE32OptHeader_disk); } else if (8==get_word_size()) { oh_size += sizeof(PE64OptHeader_disk); } else { throw FormatError("unsupported PE word size"); } unsigned char *oh = new unsigned char[oh_size]; if (4==get_word_size()) { encode((PE32OptHeader_disk*)oh); rvasize_offset = sizeof(PE32OptHeader_disk); } else if (8==get_word_size()) { encode((PE64OptHeader_disk*)oh); rvasize_offset = sizeof(PE64OptHeader_disk); } else { delete[] oh; throw FormatError("unsupported PE word size"); } while (oh_size>p_e_nt_hdr_size) { if (0!=oh[oh_size-1]) break; --oh_size; } set_e_nt_hdr_size(oh_size); return reallocated; }
int sigsegv_get_vma (unsigned long address, struct vma_struct *vma) { FILE *fp; int c; /* The stack appears as multiple adjacents segments, therefore we merge adjacent segments. */ unsigned long next_start, next_end, curr_start, curr_end; #if STACK_DIRECTION < 0 unsigned long prev_end; #endif /* Open the current process' maps file. It describes one VMA per line. */ fp = fopen ("/proc/curproc/map", "r"); if (!fp) goto failed; #if STACK_DIRECTION < 0 prev_end = 0; #endif for (curr_start = curr_end = 0; ;) { if (fscanf (fp, "0x%lx 0x%lx", &next_start, &next_end) != 2) break; while (c = getc (fp), c != EOF && c != '\n') continue; if (next_start == curr_end) { /* Merge adjacent segments. */ curr_end = next_end; } else { if (curr_start < curr_end && address >= curr_start && address <= curr_end-1) goto found; #if STACK_DIRECTION < 0 prev_end = curr_end; #endif curr_start = next_start; curr_end = next_end; } } if (address >= curr_start && address <= curr_end-1) found: { vma->start = curr_start; vma->end = curr_end; #if STACK_DIRECTION < 0 vma->prev_end = prev_end; #else if (fscanf (fp, "0x%lx 0x%lx", &vma->next_start, &next_end) != 2) vma->next_start = 0; #endif fclose (fp); vma->is_near_this = simple_is_near_this; return 0; } fclose (fp); failed: #if HAVE_MINCORE /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from mapped but swapped-out pages. See whether it's fixed. */ if (!is_mapped (0)) /* OK, mincore() appears to work as expected. */ return mincore_get_vma (address, vma); #endif return -1; }