Exemple #1
0
//Add a <key,value> pair to the cache
//If key already exists, overwrite the old value
//If maxmem capacity is exceeded, values will be removed
void cache_set(cache_t cache, _key_t key, val_t val, uint32_t val_size)
{
  cache_real_obj *c = cache->cache;

  //Delete the value if it's already in the cache.
  meta_t old = get_key_loc(cache,key);
  if (old != NULL) cache_delete(cache, key);

  uint64_t available_memory = cache->cache->size - cache_space_used(cache);
  printf("Trying to add a value of size %"PRIu32", with available memory %"PRIu64"\n",val_size,available_memory);
  if (available_memory < val_size)
    {
      printf("   Increasing size.\n");
      defrag(cache, 1); //This doubles the cache size (defragmenting at the same time).
    }
  bucket_timer_up(cache);
  //Create a new meta object and pair it to a slab address, and copy the value over
  meta_t next_meta = create_meta(cache,key,val_size);
  next_meta->address = get_address(c->slab_manager,val_size);
  //enact eviction policy if we need space
  if (next_meta->address == NULL){
    uint32_t val_slab_class = get_slab_class(c->slab_manager, val_size);
    cache_evict(cache, val_slab_class);
    next_meta->address = get_address(c->slab_manager,val_size);
    if (next_meta->address == NULL){
      uint32_t slab_class = get_slab_class(c->slab_manager, val_size);
      printf("Couldn't add a %u-%u byte value because there are no slabs of that range, and no free slabs to be allocated\n",slab_class>>1, slab_class);
      free(next_meta);
      return;
    }
void
free_udprelay()
{
    struct ev_loop *loop = EV_DEFAULT;
    while (server_num-- > 0) {
        server_ctx_t *server_ctx = server_ctx_list[server_num];

#ifdef MODULE_LOCAL
        //SSR beg
        if (server_ctx->protocol_plugin) {
            server_ctx->protocol_plugin->dispose(server_ctx->protocol);
            server_ctx->protocol = NULL;
            free_obfs_class(server_ctx->protocol_plugin);
            server_ctx->protocol_plugin = NULL;
        }
        //SSR end
#endif

        ev_io_stop(loop, &server_ctx->io);
        close(server_ctx->fd);
        cache_delete(server_ctx->conn_cache, 0);
        ss_free(server_ctx);
        server_ctx_list[server_num] = NULL;
    }
}
Exemple #3
0
void cache_purge(void)
{
    if (cachedeleteing == 0)
    {
        register struct CACHE *cacheobj = lrulist.lastlru;
        cachepurges++;
        while (cachefreecount > CACHEGOAL && cacheobj != &lrulist)
        {
            register struct CACHE *lastobj = cacheobj->lastlru;
#ifdef DEBUG
            if (cacheobj->lastlru->nextlru != cacheobj ||
                    cacheobj->nextlru->lastlru != cacheobj ||
                    *(cacheobj->parent) != cacheobj)
            {
                printk("CACHE pointers in bad shape!\n");
            }
#endif
            if (cacheobj->refcount == 0)
            {
                if (cache_delete(cacheobj) != lastobj) cacheobj = lastobj;
            }
            else
            {
                cacheobj = lastobj;
            }
        }
    }
}
Exemple #4
0
//slab_class is the slab class that we need to evict in order to make room for the incoming value
void cache_evict(cache_t cache,uint32_t slab_class){
  cache_real_obj *c = cache->cache;
  uint8_t curr_max = 0;
  node* max_node = NULL;
  int i = 0;
  for (i; i < c->num_buckets; i++){
    node* current_node = c->buckets[i]->head;
    while(current_node != NULL){
      if (  (current_node->meta->timer >= curr_max) &&
	    (slab_class <= get_slab_class(c->slab_manager, current_node->meta->size))
         ){
	    curr_max = current_node->meta->timer;
	    max_node = current_node;
	  }
    }
    if (max_node == NULL)
      {
	printf("Couldn't find a value to evict.\n");
	return;
      }
  }
  printf("Evicting %s\n",max_node->meta->key);
  cache_delete(cache,max_node->meta->key);
  return;
}
NSAPI_PUBLIC int
dns_cache_delete(void *entry)
{
	if ( !dns_cache || !entry ) 
		return -1;

	return cache_delete(dns_cache, (cache_entry_t *)entry, 0);
}
NSAPI_PUBLIC void 
cache_destroy(void *cache_ptr)
{
	cache_t *cache = (cache_t *)cache_ptr;
	cache_t *search, *last;
	cache_entry_t *ptr;
SOLARIS_PROBE(cache_destroy_start, "cache");

#ifdef IRIX
        NS_ASSERT(!cache->fast_mode);
#endif

	NS_ASSERT(cache_crit);
	NS_ASSERT(cache_ptr);
#ifdef CACHE_DEBUG
	NS_ASSERT(cache->magic == CACHE_MAGIC);
#endif

	crit_enter(cache_crit);
	crit_enter(cache->lock);

	ptr = cache->lru_head;
	while(ptr) {
		/* Caller MUST bump the access_count before calling delete
		 * We can do this since we hold the cache lock.  
		 */
		cache_use_increment(cache, ptr);
		cache_delete(cache, ptr, 0);
		ptr = cache->lru_head;
	}

	PERM_FREE(cache->table);

	cache->max_size = 0;
	cache->hash_size = 0;

	for ( last = NULL, search = cache_list; search; last = search,
		search = search->next)
		if (search == cache)
			break;

	if (search) {
		if (last) 
			last->next = search->next;
		else
			cache_list = search->next;
	}
	else {
		ereport(LOG_WARN, XP_GetAdminStr(DBT_cacheDestroyCacheTablesAppearCor_));
	}
	crit_exit(cache_crit);
	crit_exit(cache->lock);

	crit_terminate(cache->lock);

	PERM_FREE(cache);
SOLARIS_PROBE(cache_destroy_end, "cache");
}
Exemple #7
0
void cleanup_mem(void)
{
	int i;

	destroy_url_list();
	history_destroy();
	command_cleanup();
	queue_destroy(buddy_request_queue);
	cleanup_manufacture();
	cleanup_text_buffers();
	cleanup_fonts();
	destroy_all_actors();
	end_actors_lists();
	cleanup_lights();
	/* 2d objects */
	destroy_all_2d_objects();
	destroy_all_2d_object_defs();
	/* 3d objects */
	destroy_all_3d_objects();
	/* caches */
	cache_e3d->free_item = &destroy_e3d;
	cache_delete(cache_e3d);
	cache_e3d = NULL;
#ifdef NEW_TEXTURES
	free_texture_cache();
#endif
	// This should be fixed now  Sir_Odie
	cache_delete(cache_system);
	cache_system = NULL;
	/* map location information */
	for (i = 0; continent_maps[i].name; i++)
	{
	    free(continent_maps[i].name);
	}
	free (continent_maps);

	destroy_hash_table(server_marks);
	
	for (i = 0; i < video_modes_count; i++)
	{
		if (video_modes[i].name)
			free(video_modes[i].name);
	}
	free_shaders();
}
Exemple #8
0
 /**
  * @brief	Acquire a named lock, with synchronization provided via memcached.
  * @see	cache_silent_add()
  * @note	The lock will be held for a maximum of 10 minutes, and failed locking attempts will be retried
  * 		periodically for a maxmimum of 1 minute before returing failure.
  * @param	key		a managed string containing the name of the lock to be acquired.
  * @return	-1 on general failure, 0 on memcached failure, or 1 on success.
  */
int_t lock_get(stringer_t *key) {

	uint64_t value;
	stringer_t *lock = MANAGEDBUF(128);
	int_t success, iterations = MAGMA_LOCK_TIMEOUT;
//	const struct timespec delay = { .tv_sec = 0, .tv_nsec = 1000000000 };
	const struct timespec delay = { .tv_sec = 1, .tv_nsec = 0 };

	// Build the key.
	if (st_empty(key) || st_sprint(lock, "%.*s.lock", st_length_int(key), st_char_get(key)) <= 0) {
		log_pedantic("Unable generate the accessor for the cluster lock.");
		return -1;
	}

	// Build the lock value.
	value = time(NULL);

	do {

		// Keep the lock for ten minutes.
		if ((success = cache_silent_add(lock, PLACER(&value, sizeof(uint64_t)), MAGMA_LOCK_EXPIRATION)) != 1) {
			nanosleep(&delay, NULL);
		}

	} while (success != 1 && iterations--);

#ifdef MAGMA_PEDANTIC
	if (success != 1) log_pedantic("Unable to obtain a cluster lock for %.*s.", st_length_int(lock), st_char_get(lock));
#endif

	return success;
}

/**
  * @brief	Release a named lock, with synchronization provided via memcached.
  * @see	cache_delete()
  * @note	The lock will be held for 10 seconds, and locking attempts will occur periodically for 60 seconds prior to failure.
  * @param	key		a managed string containing the name of the lock to be released.
  * @return	-1 on general failure, 0 on memcached failure, or 1 on success.
  */
void lock_release(stringer_t *key) {

	stringer_t *lock = MANAGEDBUF(128);

	// Build the key.
	if (st_empty(key) || st_sprint(lock, "%.*s.lock", st_length_int(key), st_char_get(key)) <= 0) {
		log_pedantic("Unable generate the accessor for the cluster lock.");
		return;
	}

	/// LOW: At some point we should add logic to check whether this cluster node even owns the lock before
	/// 	blindly deleting the lock.
	cache_delete(lock);
	return;
}
Exemple #9
0
void free_udprelay()
{
    struct ev_loop *loop = EV_DEFAULT;
    while (server_num-- > 0) {
        server_ctx_t *server_ctx = server_ctx_list[server_num];
        ev_io_stop(loop, &server_ctx->io);
        close(server_ctx->fd);
        cache_delete(server_ctx->conn_cache, 0);
        ss_free(server_ctx);
        server_ctx_list[server_num] = NULL;
    }
}
Exemple #10
0
//Test when query for a key that was inserted and deleted; 
void test_key_deleted(){
    cache_t cache = create_cache_fake(16*DATASIZE);

    data_t data1 = create_data(1,(DATATYPE)"111",DATASIZE); 
    cache_set(cache,data1->key,data1->value,data1->value_size);
    data_existence_test(cache,data1);

    //delete the item 
    cache_delete(cache,data1->key);
    data_nonexistence_test(cache, data1);

    test_destroy_cache(cache);
    destroy_data(data1);
}
Exemple #11
0
bool should_add_evict_deletions(cache_t cache,uint32_t val_size){
    struct id_arr add_res = ids_to_delete_if_added(cache->evic_policy,val_size);
    if(add_res.should_add){
        for(size_t di = 0;di < add_res.size; di++){
            // the data given to the policy is the pointer to the key of that the cache is storing,
            //so use that to find the data and delete it
            cache_delete(cache,(key_type)(add_res.data[di]));
        }
    }
    //the array of ids needs to be freed
    if(add_res.data != NULL){
        free(add_res.data);
    }
    return add_res.should_add;
}
Exemple #12
0
void cache_remove(struct CACHE *cacheobj)
{
    if (cacheobj != NULL)
    {
        if (cacheobj->left != NULL) cache_remove(cacheobj->left);
        if (cacheobj->right != NULL) cache_remove(cacheobj->right);
        if (cacheobj->refcount == 0)
        {
            struct CACHE *delobj;
            do
            {
                delobj = cache_delete(cacheobj);
            }
            while (delobj != NULL && delobj != cacheobj);
        }
    }
}
Exemple #13
0
int cache_delete_first(void)
{
	cache_lock();

	if (ccacher.caches_size != 0 && ccacher.caches != NULL) {
		int ret;

		ret = cache_delete(0);
		xrKernelSetEventFlag(cache_del_event, CACHE_EVENT_DELETED);
		cache_unlock();

		return ret;
	}

	cache_unlock();
	return -1;
}
void free_udprelay()
{
    struct ev_loop *loop = EV_DEFAULT;
#ifdef UDPRELAY_REMOTE
    if (resolve_ctx != NULL) {
        ev_io_stop(loop, &resolve_ctx->io);
        asyncns_free(resolve_ctx->asyncns);

        close(resolve_ctx->asyncnsfd);
        free(resolve_ctx);
        resolve_ctx = NULL;
    }
#endif
    if (server_ctx != NULL) {
        ev_io_stop(loop, &server_ctx->io);
        close(server_ctx->fd);
        cache_delete(server_ctx->conn_cache, 0);
        free(server_ctx);
        server_ctx = NULL;
    }
}
NSAPI_PUBLIC int
cache_use_decrement(cache_t *cache, cache_entry_t *entry)
{
SOLARIS_PROBE(cache_use_decrement_start, "cache");
	NS_ASSERT(cache_crit);
	NS_ASSERT(cache);
	NS_ASSERT(entry);
	NS_ASSERT(entry->access_count > 0);
#ifdef CACHE_DEBUG
	NS_ASSERT(cache->magic == CACHE_MAGIC);
	NS_ASSERT(entry->magic == CACHE_ENTRY_MAGIC);
#endif

	crit_enter(cache->lock);
	/* If we are the last user of this entry and the delete
	 * is pending, cleanup now!
	 */
    int res = 0;
	if ((entry->access_count == 1) && (entry->delete_pending)) {
		if (cache_delete(cache, entry, 0) < 0) {
			/* can't delete right now; should never happen
			 * since we have the cache lock and we know the
			 * access count is 1
			 */
			entry->access_count--;
			(void)_cache_make_mru(cache, entry);
		}
        res = -1;
	} else {
		if (entry->access_count == 1)
			(void)_cache_make_mru(cache, entry);
		entry->access_count--;
	}
	crit_exit(cache->lock);

SOLARIS_PROBE(cache_use_decrement_end, "cache");
	return res;
}
NSAPI_PUBLIC int
cache_insert_p(cache_t *cache, cache_entry_t *entry, void *key, void *data,
	cache_entry_functions_t *fn)
{
	cache_entry_t *tmp;
	int bucket;
	cache_entry_t *delete_ptr;
        int rcs = NSCACHESTATUS_OK;

SOLARIS_PROBE(cache_insert_p_start, "cache");
	NS_ASSERT(cache_crit);
	NS_ASSERT(cache);
	NS_ASSERT(entry);
	NS_ASSERT((cache->mru_head && cache->lru_head) || 
		(!cache->mru_head && !cache->lru_head));
#ifdef CACHE_DEBUG
	NS_ASSERT(cache->magic == CACHE_MAGIC);
	NS_ASSERT(entry->magic == CACHE_ENTRY_MAGIC);
#endif

	crit_enter(cache->lock);
	/* Reserve a space in the cache if possible; if not; try to delete
	 * the oldest guy...  Note that since we increment to reserve
	 * space; it is possible that we have maxed the cache when there
	 * are no entries yet added; so we need to check the lru_head ptr
	 * to see if the list is empty.  If the list is empty, there is 
	 * nothing we can delete, so just fail the insert request.  This
	 * condition should go away momentarily.
	 */
	if (cache->cache_size >= cache->max_size) {

		if (!cache->lru_head) {
			/* No space in the cache */
            cache->insert_fail++;
			crit_exit(cache->lock);
SOLARIS_PROBE(cache_insert_p_end, "cache");
			return -1;
		}

		/* Caller MUST bump the access_count before calling delete
		 * We can do this since we hold the cache lock.
		 */
		delete_ptr = cache->lru_head;
		cache_use_increment(cache, delete_ptr);
		if ( cache_delete(cache, delete_ptr, 0) < 0 ) {
			cache_use_decrement(cache, delete_ptr);
			NS_ASSERT(delete_ptr->access_count > 0);
            cache->insert_fail++;
			crit_exit(cache->lock);
SOLARIS_PROBE(cache_insert_p_end, "cache");
			return -1;
		}
	}
	cache->cache_size++;
	crit_exit(cache->lock);

	entry->key = key;
	entry->data = data;
	entry->access_count = 1;
	entry->delete_pending = 0;
	entry->fn_list = fn;
	entry->next = NULL;
	entry->lru = NULL;
	entry->mru = NULL;
	bucket = cache->virtual_fn->hash_fn(cache->hash_size, key);

#ifdef IRIX
	entry->next_deleted = NULL;
#endif

	crit_enter(cache->lock);
	/* Don't add duplicate entries in the cache */
	if ( (tmp = _cache_entry_lookup(cache, key, &rcs)) ) {
                /* Try to delete original element */
                if ( cache_delete(cache, tmp, 0) < 0) {
			cache->cache_size--;
			cache_use_decrement(cache, tmp);
			NS_ASSERT(tmp->access_count > 0);
            cache->insert_fail++;
			crit_exit(cache->lock);
SOLARIS_PROBE(cache_insert_p_end, "cache");
			return -1;
		}
	}
        else if (rcs == NSCACHESTATUS_DELETEPENDING) {
            cache->cache_size--;
            cache->insert_fail++;
            crit_exit(cache->lock);
            return -1;
        }

	/* Insert in hash table */
	entry->next = cache->table[bucket];
	cache->table[bucket] = entry;

    cache->insert_ok++;
	crit_exit(cache->lock);

SOLARIS_PROBE(cache_insert_p_end, "cache");
	return 0;
}
Exemple #17
0
int lookup_mount(const char *root, const char *name, int name_len, void *context)
{
	struct lookup_context *ctxt = (struct lookup_context *) context;
	struct stat st;
	char key[KEY_MAX_LEN + 1];
	int key_len;
	char mapent[MAPENT_MAX_LEN + 1];
	struct mapent_cache *me;
	time_t now = time(NULL);
	time_t t_last_read;
	int need_hup = 0;
	int ret = 1;

	if (stat(ctxt->mapname, &st)) {
		crit(MODPREFIX "file map %s, could not stat", ctxt->mapname);
		return 1;
	}

	if (ap.type == LKP_DIRECT)
		key_len = snprintf(key, KEY_MAX_LEN, "%s/%s", root, name);
	else
		key_len = snprintf(key, KEY_MAX_LEN, "%s", name);

	if (key_len > KEY_MAX_LEN)
		return 1;

	me = cache_lookup_first();
	t_last_read = me ? now - me->age : ap.exp_runfreq + 1;

	/* only if it has been modified */
	if (st.st_mtime > ctxt->mtime) {
		ret = lookup_one(root, key, key_len, ctxt);
		if (!ret)
			return 1;

		debug("ret = %d", ret);

		if (t_last_read > ap.exp_runfreq)
			if (ret & (CHE_UPDATED | CHE_MISSING))
				need_hup = 1;

		if (ret == CHE_MISSING) {
			int wild = CHE_MISSING;

			/* Maybe update wild card map entry */
			if (ap.type == LKP_INDIRECT) {
				wild = lookup_wild(root, ctxt);
				if (wild == CHE_MISSING)
					cache_delete(root, "*", 0);
			}

			if (cache_delete(root, key, 0) &&
					wild & (CHE_MISSING | CHE_FAIL))
				rmdir_path(key);
		}
	}

	me = cache_lookup(key);
	if (me == NULL) {
		/* path component, do submount */
		me = cache_partial_match(key);
		if (me)
			sprintf(mapent, "-fstype=autofs file:%s", ctxt->mapname);
	} else
		sprintf(mapent, me->mapent);

	if (me) {
		debug(MODPREFIX "%s -> %s", key, mapent);
		ret = ctxt->parse->parse_mount(root, name, name_len,
						  mapent, ctxt->parse->context);
	}

	/* Have parent update its map ? */
	if (need_hup)
		kill(getppid(), SIGHUP);

	return ret;
}
static int lookup_one(struct autofs_point *ap,
		char *qKey, int qKey_len, struct lookup_context *ctxt)
{
	struct map_source *source;
	struct mapent_cache *mc;
	struct mapent *we;
	void *sss_ctxt = NULL;
	time_t age = time(NULL);
	char buf[MAX_ERR_BUF];
	char *value = NULL;
	char *s_key;
	int ret;

	source = ap->entry->current;
	ap->entry->current = NULL;
	master_source_current_signal(ap->entry);

	mc = source->mc;

	if (!setautomntent(ap->logopt, ctxt, ctxt->mapname, &sss_ctxt))
		return NSS_STATUS_UNAVAIL;

	ret = ctxt->getautomntbyname_r(qKey, &value, sss_ctxt);
	if (ret && ret != ENOENT) {
		char *estr = strerror_r(ret, buf, MAX_ERR_BUF);
		error(ap->logopt,
		      MODPREFIX "getautomntbyname_r: %s", estr);
		endautomntent(ap->logopt, ctxt, &sss_ctxt);
		if (value)
			free(value);
		return NSS_STATUS_UNAVAIL;
	}
	if (ret != ENOENT) {
		/*
		 * TODO: implement sun % hack for key translation for
		 * mixed case keys in schema that are single case only.
		 */
		s_key = sanitize_path(qKey, qKey_len, ap->type, ap->logopt);
		if (!s_key) {
			free(value);
			value = NULL;
			goto wild;
		}
		cache_writelock(mc);
		ret = cache_update(mc, source, s_key, value, age);
		cache_unlock(mc);
		endautomntent(ap->logopt, ctxt, &sss_ctxt);
		free(s_key);
		free(value);
		return NSS_STATUS_SUCCESS;
	}

wild:
	ret = ctxt->getautomntbyname_r("/", &value, sss_ctxt);
	if (ret && ret != ENOENT) {
		char *estr = strerror_r(ret, buf, MAX_ERR_BUF);
		error(ap->logopt,
		      MODPREFIX "getautomntbyname_r: %s", estr);
		endautomntent(ap->logopt, ctxt, &sss_ctxt);
		if (value)
			free(value);
		return NSS_STATUS_UNAVAIL;
	}
	if (ret == ENOENT) {
		ret = ctxt->getautomntbyname_r("*", &value, sss_ctxt);
		if (ret && ret != ENOENT) {
			char *estr = strerror_r(ret, buf, MAX_ERR_BUF);
			error(ap->logopt,
			      MODPREFIX "getautomntbyname_r: %s", estr);
			endautomntent(ap->logopt, ctxt, &sss_ctxt);
			if (value)
				free(value);
			return NSS_STATUS_UNAVAIL;
		}
	}

	if (ret == ENOENT) {
		/* Failed to find wild entry, update cache if needed */
		cache_writelock(mc);
		we = cache_lookup_distinct(mc, "*");
		if (we) {
			/* Wildcard entry existed and is now gone */
			if (we->source == source) {
				cache_delete(mc, "*");
				source->stale = 1;
			}
		}

		/* Not found in the map but found in the cache */
		struct mapent *exists = cache_lookup_distinct(mc, qKey);
		if (exists && exists->source == source) {
			if (exists->mapent) {
				free(exists->mapent);
				exists->mapent = NULL;
				source->stale = 1;
				exists->status = 0;
			}
		}
		cache_unlock(mc);
		endautomntent(ap->logopt, ctxt, &sss_ctxt);
		return NSS_STATUS_NOTFOUND;
	}

	cache_writelock(mc);
	/* Wildcard not in map but now is */
	we = cache_lookup_distinct(mc, "*");
	if (!we)
		source->stale = 1;
	ret = cache_update(mc, source, "*", value, age);
	cache_unlock(mc);

	endautomntent(ap->logopt, ctxt, &sss_ctxt);
        free(value);

	return NSS_STATUS_SUCCESS;
}
int lookup_mount(struct autofs_point *ap, const char *name, int name_len, void *context)
{
	struct lookup_context *ctxt = (struct lookup_context *) context;
	struct map_source *source;
	struct mapent_cache *mc;
	struct mapent *me;
	char key[KEY_MAX_LEN + 1];
	int key_len;
	char *mapent = NULL;
	char mapent_buf[MAPENT_MAX_LEN + 1];
	int ret;

	source = ap->entry->current;
	ap->entry->current = NULL;
	master_source_current_signal(ap->entry);

	mc = source->mc;

	debug(ap->logopt, MODPREFIX "looking up %s", name);

	key_len = snprintf(key, KEY_MAX_LEN + 1, "%s", name);
	if (key_len > KEY_MAX_LEN)
		return NSS_STATUS_NOTFOUND;

	/* Check if we recorded a mount fail for this key anywhere */
	me = lookup_source_mapent(ap, key, LKP_DISTINCT);
	if (me) {
		if (me->status >= time(NULL)) {
			cache_unlock(me->mc);
			return NSS_STATUS_NOTFOUND;
		} else {
			struct mapent_cache *smc = me->mc;
			struct mapent *sme;

			if (me->mapent)
				cache_unlock(smc);
			else {
				cache_unlock(smc);
				cache_writelock(smc);
				sme = cache_lookup_distinct(smc, key);
				/* Negative timeout expired for non-existent entry. */
				if (sme && !sme->mapent) {
					if (cache_pop_mapent(sme) == CHE_FAIL)
						cache_delete(smc, key);
				}
				cache_unlock(smc);
			}
		}
	}

        /*
	 * We can't check the direct mount map as if it's not in
	 * the map cache already we never get a mount lookup, so
	 * we never know about it.
	 */
	if (ap->type == LKP_INDIRECT && *key != '/') {
		int status;
		char *lkp_key;

		cache_readlock(mc);
		me = cache_lookup_distinct(mc, key);
		if (me && me->multi)
			lkp_key = strdup(me->multi->key);
		else
			lkp_key = strdup(key);
		cache_unlock(mc);

		if (!lkp_key)
			return NSS_STATUS_UNKNOWN;

		master_source_current_wait(ap->entry);
		ap->entry->current = source;

		status = check_map_indirect(ap, lkp_key, strlen(lkp_key), ctxt);
		free(lkp_key);
		if (status)
			return status;
	}

	/*
	 * We can't take the writelock for direct mounts. If we're
	 * starting up or trying to re-connect to an existing direct
	 * mount we could be iterating through the map entries with
	 * the readlock held. But we don't need to update the cache
	 * when we're starting up so just take the readlock in that
	 */
	if (ap->flags & MOUNT_FLAG_REMOUNT)
		cache_writelock(mc);
	else
		cache_readlock(mc);
	me = cache_lookup(mc, key);
	/* Stale mapent => check for entry in alternate source or wildcard */
	if (me && !me->mapent) {
		while ((me = cache_lookup_key_next(me)))
			if (me->source == source)
				break;
		if (!me)
			me = cache_lookup_distinct(mc, "*");
	}
	if (me && me->mapent) {
		/*
		 * If this is a lookup add wildcard match for later validation
		 * checks and negative cache lookups.
		 */
		if (ap->type == LKP_INDIRECT && *me->key == '*' &&
		   !(ap->flags & MOUNT_FLAG_REMOUNT)) {
			ret = cache_update(mc, source, key, me->mapent, me->age);
			if (!(ret & (CHE_OK | CHE_UPDATED)))
				me = NULL;
		}
		if (me && (me->source == source || *me->key == '/')) {
			strcpy(mapent_buf, me->mapent);
			mapent = mapent_buf;
		}
	}
	cache_unlock(mc);

	if (!mapent)
		return NSS_STATUS_TRYAGAIN;

	master_source_current_wait(ap->entry);
	ap->entry->current = source;

	debug(ap->logopt, MODPREFIX "%s -> %s", key, mapent);
	ret = ctxt->parse->parse_mount(ap, key, key_len,
				       mapent, ctxt->parse->context);
	if (ret) {
		/* Don't update negative cache when re-connecting */
		if (ap->flags & MOUNT_FLAG_REMOUNT)
			return NSS_STATUS_TRYAGAIN;
		cache_writelock(mc);
		cache_update_negative(mc, source, key, ap->negative_timeout);
		cache_unlock(mc);
		return NSS_STATUS_TRYAGAIN;
	}

	return NSS_STATUS_SUCCESS;
}
Exemple #20
0
//Evict the last one to make space
void cache_evict(cache_t cache){
    //find the key to evict
    key_type key_to_evict = cache->linked_list->evict_f(cache);
    cache_delete(cache, key_to_evict);

}