Пример #1
0
/**
 * Invalidate possibly cached page.
 *
 * This is used when we know a new and fresh copy of the page is held on
 * the disk.  Further access to the page will require reloading the
 * page from disk.
 */
void
lru_invalidate(DBM *db, long bno)
{
	struct lru_cache *cache = db->cache;
	void *value;

	if (
		g_hash_table_lookup_extended(cache->pagnum,
			ulong_to_pointer(bno), NULL, &value)
	) {
		long idx = pointer_to_int(value);

		g_assert(idx >= 0 && idx < cache->pages);
		g_assert(cache->numpag[idx] == bno);

		/*
		 * One should never be invalidating a dirty page, unless something
		 * went wrong during a split and we're trying to undo things.
		 * Since the operation will cause a data loss, warn.
		 */

		if (cache->dirty[idx]) {
			g_warning("sdbm: \"%s\": %s() invalidating dirty page #%ld",
				db->name, stacktrace_caller_name(1), bno);
		}

		hash_list_remove(cache->used, value);
		g_hash_table_remove(cache->pagnum, ulong_to_pointer(bno));
		cache->numpag[idx] = -1;
		cache->dirty[idx] = FALSE;
		slist_append(cache->available, value);	/* Make index available */
	}
}
Пример #2
0
/**
 * Compute the mapping offset for the program / library.
 *
 * The .text section could say 0x500000 but the actual virtual memory
 * address where the library was mapped could be 0x600000.  Hence looking
 * for addresses at 0x6xxxxx would not create any match with the symbol
 * addresses held in the file.
 *
 * The base given here should be the actual VM address where the kernel
 * loaded the first section.
 *
 * The computed offset will then be automatically used to adjust the given
 * addresses being looked at, remapping them to the proper range for lookup
 * purposes.
 *
 * @param bc		the BFD context (NULL allowed for convenience)
 * @param base		the VM mapping address of the text segment
 */
void
bfd_util_compute_offset(bfd_ctx_t *bc, ulong base)
{
	asection *sec;
	bfd *b;

	if (NULL == bc)
		return;			/* Convenience */

	bfd_ctx_check(bc);

	if (bc->offseted || NULL == bc->handle)
		return;

	mutex_lock_fast(&bc->lock);

	if (bc->offseted) {
		mutex_unlock_fast(&bc->lock);
		return;
	}

	b = bc->handle;
	if (NULL == b) {
		mutex_unlock_fast(&bc->lock);
		return;
	}

	/*
	 * Take the first section of the file and look where its page would start.
	 * Then compare that to the advertised mapping base for the object to
	 * know the offset we have to apply for proper symbol resolution.
	 */

	sec = b->sections;

	/*
	 * Notes for later: sections are linked through sec->next.
	 *
	 * It is possible to gather the section name via:
	 *		const char *name = bfd_section_name(b, sec);
	 */

	if (sec != NULL) {
		bfd_vma addr = bfd_section_vma(b, sec);

		bc->offset = ptr_diff(vmm_page_start(ulong_to_pointer(addr)),
			vmm_page_start(ulong_to_pointer(base)));
	}

	bc->offseted = TRUE;
	mutex_unlock_fast(&bc->lock);
}
Пример #3
0
static inline void
set_parent(rbnode_t *node, rbnode_t *parent)
{
	node->parent = ulong_to_pointer(
		pointer_to_ulong(parent) |
		(pointer_to_ulong(node->parent) & RB_COLOR_MASK)
	);
}
Пример #4
0
static inline
void set_color(rbnode_t *node, enum rbcolor color)
{
	g_assert(RB_BLACK == color || RB_RED == color);

	node->parent = ulong_to_pointer(
		(pointer_to_ulong(node->parent) & ~RB_COLOR_MASK) | color);
}
Пример #5
0
/**
 * Load text symbols from the file into supplied table.
 *
 * @param bc		the BFD context pointing to the file
 * @param st		the symbol table where symbols should be added
 */
static void
bfd_util_load_text(bfd_ctx_t *bc, symbols_t *st)
{
	long i;
	asymbol* empty;
	void *p;

	bfd_ctx_check(bc);
	g_assert(st != NULL);

	if (0 == bc->count)
		return;

	mutex_lock_fast(&bc->lock);

	g_assert(bc->symbols != NULL);

	empty = bfd_make_empty_symbol(bc->handle);
	symbols_lock(st);

	for (
		i = 0, p = bc->symbols;
		i < bc->count;
		i++, p = ptr_add_offset(p, bc->symsize)
	) {
		asymbol *sym;
		symbol_info syminfo;

		sym = bfd_minisymbol_to_symbol(bc->handle, bc->dynamic, p, empty);
		bfd_get_symbol_info(bc->handle, sym, &syminfo);

		if ('T' == syminfo.type || 't' == syminfo.type) {
			const char *name = bfd_asymbol_name(sym);

			if (name != NULL && name[0] != '.') {
				void *addr = ulong_to_pointer(syminfo.value);
				symbols_append(st, addr, name);
			}
		}
	}

	symbols_unlock(st);
	mutex_unlock_fast(&bc->lock);
}
Пример #6
0
/**
 * Get the address in the cache of a given page number.
 *
 * @param db		the database
 * @param num		the page number in the DB
 *
 * @return page address if found, NULL if not cached.
 */
char *
lru_cached_page(DBM *db, long num)
{
	struct lru_cache *cache = db->cache;
	void *value;

	g_assert(num >= 0);

	if (
		cache != NULL &&
		g_hash_table_lookup_extended(cache->pagnum,
			ulong_to_pointer(num), NULL, &value)
	) {
		long idx = pointer_to_int(value);

		g_assert(idx >= 0 && idx < cache->pages);
		g_assert(cache->numpag[idx] == num);

		return cache->arena + OFF_PAG(idx);
	}

	return NULL;
}
Пример #7
0
/**
 * Get a suitable buffer in the cache to read a page and set db->pagbuf
 * accordingly.
 *
 * The '`loaded'' parameter, if non-NULL, is set to TRUE if page was already
 * held in the cache, FALSE when it needs to be loaded.
 *
 * @return TRUE if OK, FALSE if we could not allocate a suitable buffer, leaving
 * the old db->pagbuf intact.
 */
gboolean
readbuf(DBM *db, long num, gboolean *loaded)
{
	struct lru_cache *cache = db->cache;
	void *value;
	long idx;
	gboolean good_page;

	g_assert(num >= 0);

	if (
		g_hash_table_lookup_extended(cache->pagnum,
			ulong_to_pointer(num), NULL, &value)
	) {
		hash_list_moveto_head(cache->used, value);
		idx = pointer_to_int(value);

		g_assert(idx >= 0 && idx < cache->pages);
		g_assert(cache->numpag[idx] == num);

		good_page = TRUE;
		cache->rhits++;
	} else {
		idx = getidx(db, num);
		if (-1 == idx)
			return FALSE;	/* Do not update db->pagbuf */

		good_page = FALSE;
		cache->rmisses++;
	}

	db->pagbuf = cache->arena + OFF_PAG(idx);
	if (loaded != NULL)
		*loaded = good_page;

	return TRUE;
}
Пример #8
0
static inline rbnode_t *
get_parent(const rbnode_t *node)
{
	return ulong_to_pointer(pointer_to_ulong(node->parent) & ~RB_COLOR_MASK);
}
Пример #9
0
static inline
void invalidate(rbnode_t *node)
{
	node->parent = ulong_to_pointer(
		(pointer_to_ulong(node->parent) & ~RB_COLOR_MASK));
}
Пример #10
0
/**
 * Cache new page held in memory if there are deferred writes configured.
 * @return TRUE on success.
 */
gboolean
cachepag(DBM *db, char *pag, long num)
{
	struct lru_cache *cache = db->cache;
	void *value;

	g_assert(num >= 0);

	/*
	 * Coming from makroom() where we allocated a new page, starting at "pag".
	 *
	 * Normally the page should not be cached, but it is possible we iterated
	 * over the hash table and traversed the page on disk as a hole, and cached
	 * it during the process.  If present, it must be clean and should hold
	 * no data (or the bitmap forest in the .dir file is corrupted).
	 *
	 * Otherwise, we cache the new page and hold it there if we we can defer
	 * writes, or flush it to disk immediately (without caching it).
	 */

	if (
		g_hash_table_lookup_extended(cache->pagnum,
			ulong_to_pointer(num), NULL, &value)
	) {
		long idx;
		unsigned short *ino;
		unsigned weird = 0;
		char *cpag;

		/*
		 * Do not move the page to the head of the cache list.
		 *
		 * This page should not have been cached (it was supposed to be a
		 * hole up to now) and its being cached now does not constitute usage.
		 */

		idx = pointer_to_int(value);
		g_assert(idx >= 0 && idx < cache->pages);
		g_assert(cache->numpag[idx] == num);

		/*
		 * Not a read hit since we're about to supersede the data
		 */

		cpag = cache->arena + OFF_PAG(idx);
		ino = (unsigned short *) cpag;

		if (ino[0] != 0) {
			weird++;
			g_warning("sdbm: \"%s\": new page #%ld was cached but not empty",
				db->name, num);
		}
		if (cache->dirty[idx]) {
			weird++;
			g_warning("sdbm: \"%s\": new page #%ld was cached and not clean",
				db->name, num);
		}
		if (weird > 0) {
			g_warning("sdbm: \"%s\": previous warning%s indicate possible "
				"corruption in the bitmap forest",
				db->name, 1 == weird ? "" : "s");
		}

		/*
		 * Supersede cached page with new page created by makroom().
		 */

		memmove(cpag, pag, DBM_PBLKSIZ);

		if (cache->write_deferred) {
			cache->dirty[idx] = TRUE;
		} else {
			cache->dirty[idx] = !flushpag(db, pag, num);
		}
		return TRUE;
	} else if (cache->write_deferred) {
		long idx;
		char *cpag;

		idx = getidx(db, num);
		if (-1 == idx)
			return FALSE;

		cpag = cache->arena + OFF_PAG(idx);
		memmove(cpag, pag, DBM_PBLKSIZ);
		cache->dirty[idx] = TRUE;
		return TRUE;
	} else {
		return flushpag(db, pag, num);
	}
}
Пример #11
0
/**
 * Get a new index in the cache, and update LRU data structures.
 *
 * @param db	the database
 * @param num	page number in the DB for which we want a cache index
 *
 *
 * @return -1 on error, or the allocated cache index.
 */
static int
getidx(DBM *db, long num)
{
	struct lru_cache *cache = db->cache;
	long n;		/* Cache index */

	/*
	 * If we invalidated pages, reuse their indices.
	 * If we have not used all the pages yet, get the next one.
	 * Otherwise, use the least-recently requested page.
	 */

	if (slist_length(cache->available)) {
		void *v = slist_shift(cache->available);
		n = pointer_to_int(v);
		g_assert(n >= 0 && n < cache->pages);
		g_assert(!cache->dirty[n]);
		g_assert(-1 == cache->numpag[n]);
		hash_list_prepend(cache->used, int_to_pointer(n));
	} else if (cache->next < cache->pages) {
		n = cache->next++;
		cache->dirty[n] = FALSE;
		hash_list_prepend(cache->used, int_to_pointer(n));
	} else {
		void *last = hash_list_tail(cache->used);
		long oldnum;
		gboolean had_ioerr = booleanize(db->flags & DBM_IOERR_W);

		hash_list_moveto_head(cache->used, last);
		n = pointer_to_int(last);

		/*
		 * This page is no longer cached as its cache index is being reused
		 * Flush it to disk if dirty before discarding it.
		 */

		g_assert(n >= 0 && n < cache->pages);

		oldnum = cache->numpag[n];

		if (cache->dirty[n] && !writebuf(db, oldnum, n)) {
			hash_list_iter_t *iter;
			void *item;
			gboolean found = FALSE;

			/*
			 * Cannot flush dirty page now, probably because we ran out of
			 * disk space.  Look through the cache whether we can reuse a
			 * non-dirty page instead, which would let us keep the dirty
			 * page a little longer in the cache, in the hope it can then
			 * be properly flushed later.
			 */

			iter = hash_list_iterator_tail(cache->used);

			while (NULL != (item = hash_list_iter_previous(iter))) {
				long i = pointer_to_int(item);

				g_assert(i >= 0 && i < cache->pages);

				if (!cache->dirty[i]) {
					found = TRUE;	/* OK, reuse cache slot #i then */
					n = i;
					oldnum = cache->numpag[i];
					break;
				}
			}

			hash_list_iter_release(&iter);

			if (found) {
				g_assert(item != NULL);
				hash_list_moveto_head(cache->used, item);

				/*
				 * Clear error condition if we had none prior to the flush
				 * attempt, since we can do without it for now.
				 */

				if (!had_ioerr)
					db->flags &= ~DBM_IOERR_W;

				g_warning("sdbm: \"%s\": "
					"reusing cache slot used by clean page #%ld instead",
					sdbm_name(db), oldnum);
			} else {
				g_warning("sdbm: \"%s\": cannot discard dirty page #%ld",
					sdbm_name(db), oldnum);
				return -1;
			}
		}

		g_hash_table_remove(cache->pagnum, ulong_to_pointer(oldnum));
		cache->dirty[n] = FALSE;
	}

	/*
	 * Record the association between the cache index and the page number.
	 */

	g_assert(n >= 0 && n < cache->pages);

	cache->numpag[n] = num;
	g_hash_table_insert(cache->pagnum,
		ulong_to_pointer(num), int_to_pointer(n));

	return n;
}