示例#1
0
static apr_status_t decrement_refcount(void *arg)
{
    cache_object_t *obj = (cache_object_t *) arg;

    /* If obj->complete is not set, the cache update failed and the
     * object needs to be removed from the cache then cleaned up.
     * The garbage collector may have ejected the object from the
     * cache already, so make sure it is really still in the cache
     * before attempting to remove it.
     */
    if (!obj->complete) {
        cache_object_t *tobj = NULL;
        if (sconf->lock) {
            apr_thread_mutex_lock(sconf->lock);
        }
        tobj = cache_find(sconf->cache_cache, obj->key);
        if (tobj == obj) {
            cache_remove(sconf->cache_cache, obj);
            apr_atomic_dec32(&obj->refcount);
        }
        if (sconf->lock) {
            apr_thread_mutex_unlock(sconf->lock);
        }
    }

    /* If the refcount drops to 0, cleanup the cache object */
    if (!apr_atomic_dec32(&obj->refcount)) {
        cleanup_cache_object(obj);
    }
    return APR_SUCCESS;
}
示例#2
0
void * APR_THREAD_FUNC thread_func_atomic(apr_thread_t *thd, void *data)
{
    int i;

    for (i = 0; i < NUM_ITERATIONS ; i++) {
        apr_atomic_inc32(&y);
        apr_atomic_add32(&y, 2);
        apr_atomic_dec32(&y);
        apr_atomic_dec32(&y);
    }
    apr_thread_exit(thd, exit_ret_val);
    return NULL;
}
示例#3
0
static void test_dec32(abts_case *tc, void *data)
{
    apr_uint32_t y32;
    int rv;

    apr_atomic_set32(&y32, 2);

    rv = apr_atomic_dec32(&y32);
    ABTS_INT_EQUAL(tc, 1, y32);
    ABTS_ASSERT(tc, "atomic_dec returned zero when it shouldn't", rv != 0);

    rv = apr_atomic_dec32(&y32);
    ABTS_INT_EQUAL(tc, 0, y32);
    ABTS_ASSERT(tc, "atomic_dec didn't returned zero when it should", rv == 0);
}
示例#4
0
文件: fdqueue.c 项目: AzerTyQsdF/osx
void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info)
{
    /* Atomically pop a pool from the recycled list */

    /* This function is safe only as long as it is single threaded because
     * it reaches into the queue and accesses "next" which can change.
     * We are OK today because it is only called from the listener thread.
     * cas-based pushes do not have the same limitation - any number can
     * happen concurrently with a single cas-based pop.
     */

    *recycled_pool = NULL;


    /* Atomically pop a pool from the recycled list */
    for (;;) {
        struct recycled_pool *first_pool = queue_info->recycled_pools;
        if (first_pool == NULL) {
            break;
        }
        if (apr_atomic_casptr
            ((void*) &(queue_info->recycled_pools),
             first_pool->next, first_pool) == first_pool) {
            *recycled_pool = first_pool->pool;
            if (queue_info->max_recycled_pools >= 0)
                apr_atomic_dec32(&queue_info->recycled_pools_count);
            break;
        }
    }
}
示例#5
0
static apr_status_t cleanup_cache_mem(void *sconfv)
{
    cache_object_t *obj;
    mem_cache_conf *co = (mem_cache_conf*) sconfv;

    if (!co) {
        return APR_SUCCESS;
    }
    if (!co->cache_cache) {
        return APR_SUCCESS;
    }

    if (sconf->lock) {
        apr_thread_mutex_lock(sconf->lock);
    }
    obj = cache_pop(co->cache_cache);
    while (obj) {
        /* Iterate over the cache and clean up each unreferenced entry */
        if (!apr_atomic_dec32(&obj->refcount)) {
            cleanup_cache_object(obj);
        }
        obj = cache_pop(co->cache_cache);
    }

    /* Cache is empty, free the cache table */
    cache_free(co->cache_cache);

    if (sconf->lock) {
        apr_thread_mutex_unlock(sconf->lock);
    }
    return APR_SUCCESS;
}
示例#6
0
/* remove_entity()
 * Notes:
 *   refcount should be at least 1 upon entry to this function to account
 *   for this thread's reference to the object. If the refcount is 1, then
 *   object has been removed from the cache by another thread and this thread
 *   is the last thread accessing the object.
 */
static int remove_entity(cache_handle_t *h)
{
    cache_object_t *obj = h->cache_obj;
    cache_object_t *tobj = NULL;

    if (sconf->lock) {
        apr_thread_mutex_lock(sconf->lock);
    }

    /* If the entity is still in the cache, remove it and decrement the
     * refcount. If the entity is not in the cache, do nothing. In both cases
     * decrement_refcount called by the last thread referencing the object will
     * trigger the cleanup.
     */
    tobj = cache_find(sconf->cache_cache, obj->key);
    if (tobj == obj) {
        cache_remove(sconf->cache_cache, obj);
        apr_atomic_dec32(&obj->refcount);
    }

    if (sconf->lock) {
        apr_thread_mutex_unlock(sconf->lock);
    }

    return OK;
}
示例#7
0
/* remove_url()
 * Notes:
 */
static int remove_url(cache_handle_t *h, apr_pool_t *p)
{
    cache_object_t *obj;
    int cleanup = 0;

    if (sconf->lock) {
        apr_thread_mutex_lock(sconf->lock);
    }

    obj = h->cache_obj;
    if (obj) {
        cache_remove(sconf->cache_cache, obj);
        /* For performance, cleanup cache object after releasing the lock */
        cleanup = !apr_atomic_dec32(&obj->refcount);
    }
    if (sconf->lock) {
        apr_thread_mutex_unlock(sconf->lock);
    }

    if (cleanup) {
        cleanup_cache_object(obj);
    }

    return OK;
}
示例#8
0
文件: bench_rpc.c 项目: Abioy/mpaxos
rpc_state* add_cb(rpc_state *state) {
    // Do nothing
    
    //uint32_t j = apr_atomic_add32(&n_rpc_, 1);
    
    uint32_t *res = (uint32_t*) state->raw_input;
    uint32_t k = *res;
    
    n_callbacks_[k] += 1;
    LOG_DEBUG("client callback exceuted. rpc no: %d", n_rpc_);

    if (n_callbacks_[k] == max_rpc_) {       
	if (apr_atomic_dec32(&n_active_cli_) == 0) {
	    tm_end_ = apr_time_now();
	    apr_thread_mutex_lock(mx_rpc_);
	    apr_thread_cond_signal(cd_rpc_);
	    apr_thread_mutex_unlock(mx_rpc_);
	}
    }
    
    if (n_callbacks_[k] % 1000000 == 0) {
	apr_atomic_add32(&n_rpc_, 1000000);
	tm_middle_ = apr_time_now();
	uint64_t p = tm_middle_ - tm_begin_;
	double rate = n_rpc_ * 1.0 / p;
	LOG_INFO("rpc rate: %0.2f million per second", rate);
    }

    // do another rpc.    
    if (max_rpc_ < 0 || n_issues_[k] < max_rpc_) {
	n_issues_[k]++;
	call_add(clis_[k], k);
    }
    return NULL;
}
示例#9
0
static APR_INLINE int proc_pthread_mutex_dec(apr_proc_mutex_t *mutex)
{
    if (mutex->pthread_refcounting) {
        return apr_atomic_dec32(&proc_pthread_mutex_refcount(mutex));
    }
    return 0;
}
示例#10
0
文件: apu_dso.c 项目: cmjonze/apr
apr_status_t apu_dso_init(apr_pool_t *pool)
{
    apr_status_t ret = APR_SUCCESS;
    apr_pool_t *parent;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;

        return APR_SUCCESS;
    }

    /* Top level pool scope, need process-scope lifetime */
    for (parent = apr_pool_parent_get(pool);
         parent && parent != pool;
         parent = apr_pool_parent_get(pool))
        pool = parent;

    dsos = apr_hash_make(pool);

#if APR_HAS_THREADS
    ret = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
    /* This already registers a pool cleanup */
#endif

    apr_pool_cleanup_register(pool, NULL, apu_dso_term,
                              apr_pool_cleanup_null);

    apr_atomic_dec32(&in_init);

    return ret;
}
示例#11
0
static void release(h2_mplx *m)
{
    if (!apr_atomic_dec32(&m->refs)) {
        if (m->join_wait) {
            apr_thread_cond_signal(m->join_wait);
        }
    }
}
示例#12
0
SWITCH_DECLARE(int) switch_atomic_dec(volatile switch_atomic_t *mem)
{
#ifdef apr_atomic_t
	return apr_atomic_dec((apr_atomic_t *)mem);
#else
	return apr_atomic_dec32((apr_uint32_t *)mem);
#endif
}
示例#13
0
文件: child.c 项目: Ga-vin/apache
static winnt_conn_ctx_t *winnt_get_connection(winnt_conn_ctx_t *context)
{
    int rc;
    DWORD BytesRead;
    LPOVERLAPPED pol;
#ifdef _WIN64
    ULONG_PTR CompKey;
#else
    DWORD CompKey;
#endif

    mpm_recycle_completion_context(context);

    apr_atomic_inc32(&g_blocked_threads);
    while (1) {
        if (workers_may_exit) {
            apr_atomic_dec32(&g_blocked_threads);
            return NULL;
        }
        rc = GetQueuedCompletionStatus(ThreadDispatchIOCP, &BytesRead,
                                       &CompKey, &pol, INFINITE);
        if (!rc) {
            rc = apr_get_os_error();
            ap_log_error(APLOG_MARK, APLOG_DEBUG, rc, ap_server_conf, APLOGNO(00349)
                         "Child: GetQueuedComplationStatus returned %d",
                         rc);
            continue;
        }

        switch (CompKey) {
        case IOCP_CONNECTION_ACCEPTED:
            context = CONTAINING_RECORD(pol, winnt_conn_ctx_t, overlapped);
            break;
        case IOCP_SHUTDOWN:
            apr_atomic_dec32(&g_blocked_threads);
            return NULL;
        default:
            apr_atomic_dec32(&g_blocked_threads);
            return NULL;
        }
        break;
    }
    apr_atomic_dec32(&g_blocked_threads);

    return context;
}
示例#14
0
IDATA VMCALL hythread_set_thread_stop_callback(hythread_t thread,
    tm_thread_event_callback_proc stop_callback)
{
    IDATA status = hythread_set_safepoint_callback(thread, stop_callback);

    while (thread->suspend_count > 0) {
        apr_atomic_dec32((volatile apr_uint32_t *)
            &thread->suspend_count);
        apr_atomic_dec32((volatile apr_uint32_t *)
            &thread->request);
    }

    // if there is no competition, it would be 1, but if someone else is
    // suspending the same thread simultaneously, it could be greater than 1
    // if safepoint callback isn't set it could be equal to 0.
    //
    // The following assertion may be false because at each time
    // one of the conditions is true, and the other is false, but
    // when checking the whole condition it may be failse in the result.
    // assert(thread->request > 0 || thread->safepoint_callback == NULL);

    // notify the thread that it may wake up now,
    // so that it would eventually reach exception safepoint
    // and execute callback
    hysem_post(thread->resume_event);

    if (thread->state &
            (TM_THREAD_STATE_SLEEPING | TM_THREAD_STATE_WAITING_WITH_TIMEOUT
                | TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT
                | TM_THREAD_STATE_WAITING_INDEFINITELY | TM_THREAD_STATE_PARKED))
    {
        // This is needed for correct stopping of a thread blocked on monitor_wait.
        // The thread needs some flag to exit its waiting loop.
        // We piggy-back on interrupted status. A correct exception from TLS
        // will be thrown because the check of exception status on leaving
        // JNI frame comes before checking return status in Object.wait().
        // Interrupted status will be cleared by function returning TM_ERROR_INTERRUPT.
        // (though, in case of parked thread, it will not be cleared)
        hythread_interrupt(thread);
    }
    return status;
} // hythread_set_thread_stop_callback
示例#15
0
/**
 * memcache_cache_free()
 * memcache_cache_free is a callback that is only invoked by a thread
 * running in cache_insert(). cache_insert() runs under protection
 * of sconf->lock.  By the time this function has been entered, the cache_object
 * has been ejected from the cache. decrement the refcount and if the refcount drops
 * to 0, cleanup the cache object.
 */
static void memcache_cache_free(void*a)
{
    cache_object_t *obj = (cache_object_t *)a;

    /* Decrement the refcount to account for the object being ejected
     * from the cache. If the refcount is 0, free the object.
     */
    if (!apr_atomic_dec32(&obj->refcount)) {
        cleanup_cache_object(obj);
    }
}
示例#16
0
static void busyloop_dec32(tbox_t *tbox)
{
    apr_uint32_t val;

    do {
        busyloop_read32(tbox);
        val = apr_atomic_dec32(tbox->mem);
        apr_thread_mutex_lock(thread_lock);
        ABTS_INT_NEQUAL(tbox->tc, 0, val);
        apr_thread_mutex_unlock(thread_lock);
    } while (--tbox->loop);
}
示例#17
0
// Called when a child process is about to exec.
static apr_status_t femto_child_exit(void* data)
{
  apr_uint32_t nonzero = apr_atomic_dec32( &femto_num_running );
  //fprintf(stderr, "FEMTO stopping was %i\n", (int) nonzero);
  //fprintf(stdout, "FEMTO stopping was %i\n", (int) nonzero);
  if( ! nonzero ) {
    //fprintf(stdout, "FEMTO stopiing really\n");
    femto_stop_server(&femto_server);
  }

  return APR_SUCCESS;
}
示例#18
0
static void test_wrap_zero(abts_case *tc, void *data)
{
    apr_uint32_t y32;
    apr_uint32_t rv;
    apr_uint32_t minus1 = -1;
    char *str;

    apr_atomic_set32(&y32, 0);
    rv = apr_atomic_dec32(&y32);

    ABTS_ASSERT(tc, "apr_atomic_dec32 on zero returned zero.", rv != 0);
    str = apr_psprintf(p, "zero wrap failed: 0 - 1 = %d", y32);
    ABTS_ASSERT(tc, str, y32 == minus1);
}
示例#19
0
static void lock_and_wait(toolbox_t *box)
{
    apr_status_t rv;
    abts_case *tc = box->tc;
    apr_uint32_t *count = box->data;

    rv = apr_thread_mutex_lock(box->mutex);
    ABTS_SUCCESS(rv);

    apr_atomic_inc32(count);

    rv = apr_thread_cond_wait(box->cond, box->mutex);
    ABTS_SUCCESS(rv);

    apr_atomic_dec32(count);

    rv = apr_thread_mutex_unlock(box->mutex);
    ABTS_SUCCESS(rv);
}
示例#20
0
static apr_status_t 
s_handle_response(serf_request_t *UNUSED(request), serf_bucket_t *response, void *handler_ctx, apr_pool_t *UNUSED(pool))
{
  const char      *data;
  apr_size_t      len;
  serf_status_line sl;
  apr_status_t     rv;
  handler_ctx_t  *ctx = handler_ctx;

  rv = serf_bucket_response_status(response, &sl);
  if (rv != APR_SUCCESS) {
    if (APR_STATUS_IS_EAGAIN(rv)) {
      return rv;
    }
    ctx->rv = rv;
    apr_atomic_dec32(&ctx->requests_outstanding); 
    return rv;
  }
  ctx->reason = sl.reason;
  ctx->response_code = sl.code;

  while (1) {
    rv = serf_bucket_read(response, 2048, &data, &len);
    if (SERF_BUCKET_READ_ERROR(rv)) {
      ctx->rv = rv;
      apr_atomic_dec32(&ctx->requests_outstanding);
      DBG(ctx->r, "REQ[%X] (ERROR)", TO_ADDR(ctx->r));
      DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__);
      return rv;
    }
    if (APR_STATUS_IS_EAGAIN(rv)) {
      /* 0 byte return if EAGAIN returned. */
      DBG(ctx->r,"REQ[%X] (EAGAIN) len:[%d]", TO_ADDR(ctx->r), (int)len);
      DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__);
      return rv;
    }

    if (len > 0) {
      if (! ctx->response) {
        ctx->response = apr_palloc(ctx->pool, len);
        ctx->response[0] = 0;
        ctx->response_len = 0;
      }
      else {
        char *tmp = apr_palloc(ctx->pool, ctx->response_len);
        memcpy(tmp, ctx->response, ctx->response_len);
        ctx->response = apr_palloc(ctx->pool, ctx->response_len + len);
        memcpy(ctx->response, tmp, ctx->response_len);
      }
      memcpy(&ctx->response[ctx->response_len], data, len);
      ctx->response_len += len;
      ctx->response[ctx->response_len] = 0;
    }
    
    if (APR_STATUS_IS_EOF(rv)) {
      serf_bucket_t *hdrs;
      char *tmp_headers = "";
      hdrs = serf_bucket_response_get_headers(response);
      while (1) {
        rv = serf_bucket_read(hdrs, 2048, &data, &len);
        if (SERF_BUCKET_READ_ERROR(rv))
          return rv;
        tmp_headers = apr_pstrcat(ctx->pool, tmp_headers, apr_psprintf(ctx->pool , "%.*s", (unsigned int)len, data), NULL);
        if (APR_STATUS_IS_EOF(rv)) {
          break;
        }
      }
      ctx->headers_out = apr_table_make(ctx->pool, 0);

      char *pstat;
      char *pair = NULL;
      for (;;) {
        pair = apr_strtok(tmp_headers, "\n", &pstat);
        if (!pair) break;
        tmp_headers = NULL;
        char *key;
        char *val;

        char *tpair = apr_pstrdup(ctx->pool, pair);
        key = tpair;
        val = strchr(tpair, ':');
        if (val) {
          *val = 0;
          val++;
          key = qs_trim_string(ctx->pool, key);
          val = qs_trim_string(ctx->pool, val);
          DBG(ctx->r,"REQ[%X] key:[%s], val:[%s]", TO_ADDR(ctx->r),key, val);
          apr_table_add(ctx->headers_out, key, val);
        }
      }
      ctx->rv = APR_SUCCESS;
      apr_atomic_dec32(&ctx->requests_outstanding);
      DBG(ctx->r,"REQ[%X] (NORMAL)", TO_ADDR(ctx->r));
      DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__);
      return APR_EOF;
    }

    if (APR_STATUS_IS_EAGAIN(rv)) {
      DBG(ctx->r,"REQ[%X] (EAGAIN)", TO_ADDR(ctx->r));
      DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__);
      return rv;
    }
  }
}
示例#21
0
static apr_status_t dbm_open_type(apr_dbm_type_t const* * vtable,
                                  const char *type, 
                                  apr_pool_t *pool)
{
#if !APU_DSO_BUILD

    *vtable = NULL;
    if (!strcasecmp(type, "default"))     *vtable = &DBM_VTABLE;
#if APU_HAVE_DB
    else if (!strcasecmp(type, "db"))     *vtable = &apr_dbm_type_db;
#endif
    else if (*type && !strcasecmp(type + 1, "dbm")) {
#if APU_HAVE_GDBM
        if (*type == 'G' || *type == 'g') *vtable = &apr_dbm_type_gdbm;
#endif
#if APU_HAVE_NDBM
        if (*type == 'N' || *type == 'n') *vtable = &apr_dbm_type_ndbm;
#endif
#if APU_HAVE_SDBM
        if (*type == 'S' || *type == 's') *vtable = &apr_dbm_type_sdbm;
#endif
        /* avoid empty block */ ;
    }
    if (*vtable)
        return APR_SUCCESS;
    return APR_ENOTIMPL;

#else /* APU_DSO_BUILD */

    char modname[32];
    char symname[34];
    apr_dso_handle_sym_t symbol;
    apr_status_t rv;
    int usertype = 0;

    if (!strcasecmp(type, "default"))        type = DBM_NAME;
    else if (!strcasecmp(type, "db"))        type = "db";
    else if (*type && !strcasecmp(type + 1, "dbm")) {
        if      (*type == 'G' || *type == 'g') type = "gdbm"; 
        else if (*type == 'N' || *type == 'n') type = "ndbm"; 
        else if (*type == 'S' || *type == 's') type = "sdbm"; 
    }
    else usertype = 1;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;
    }
    else {
        apr_pool_t *parent;

        /* Top level pool scope, need process-scope lifetime */
        for (parent = pool;  parent; parent = apr_pool_parent_get(pool))
             pool = parent;

        /* deprecate in 2.0 - permit implicit initialization */
        apu_dso_init(pool);

        drivers = apr_hash_make(pool);
        apr_hash_set(drivers, "sdbm", APR_HASH_KEY_STRING, &apr_dbm_type_sdbm);

        apr_pool_cleanup_register(pool, NULL, dbm_term,
                                  apr_pool_cleanup_null);

        apr_atomic_dec32(&in_init);
    }

    rv = apu_dso_mutex_lock();
    if (rv) {
        *vtable = NULL;
        return rv;
    }

    *vtable = apr_hash_get(drivers, type, APR_HASH_KEY_STRING);
    if (*vtable) {
        apu_dso_mutex_unlock();
        return APR_SUCCESS;
    }

    /* The driver DSO must have exactly the same lifetime as the
     * drivers hash table; ignore the passed-in pool */
    pool = apr_hash_pool_get(drivers);

#if defined(NETWARE)
    apr_snprintf(modname, sizeof(modname), "dbm%s.nlm", type);
#elif defined(WIN32)
    apr_snprintf(modname, sizeof(modname),
                 "apr_dbm_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".dll", type);
#else
    apr_snprintf(modname, sizeof(modname),
                 "apr_dbm_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".so", type);
#endif
    apr_snprintf(symname, sizeof(symname), "apr_dbm_type_%s", type);

    rv = apu_dso_load(NULL, &symbol, modname, symname, pool);
    if (rv == APR_SUCCESS || rv == APR_EINIT) { /* previously loaded?!? */
        *vtable = symbol;
        if (usertype)
            type = apr_pstrdup(pool, type);
        apr_hash_set(drivers, type, APR_HASH_KEY_STRING, *vtable);
        rv = APR_SUCCESS;
    }
    else
        *vtable = NULL;

    apu_dso_mutex_unlock();
    return rv;

#endif /* APU_DSO_BUILD */
}
示例#22
0
void *http_server(apr_thread_t* t, void* d)
{
	http_server_data_t *server_data = d;
	struct event_config *cfg = event_config_new();
	struct event_base *base = NULL;
	struct evhttp *http = NULL;
	int i = 0;

	event_config_set_flag(cfg, EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST);

	base = event_base_new_with_config(cfg);
	if (!base)
	{
		LOG4C_ERROR(logger, "Couldn't create an event_base: exiting");
		goto done;
	}

	event_config_free(cfg);

	/* Create a new evhttp object to handle requests. */
	http = evhttp_new(base);
	if (!http)
	{
	    LOG4C_ERROR(logger, "couldn't create evhttp. Exiting.");
		goto done;
	}

	evhttp_add_server_alias(http, "TVersity-Virtual-STB");

	for (i = 0; i < server_data->num_handlers; i++)
	{
		evhttp_set_cb(http, server_data->handlers[i].uri, http_request_cb, &(server_data->handlers[i]));
	}

	//set up static file handler
	if (server_data->static_root)
	{
	    evhttp_set_gencb(http, static_files_cb, (void *) server_data->static_root);
	}

	struct evhttp_bound_socket *handle = evhttp_accept_socket_with_handle(http, server_data->sock);
	if (!handle)
	{
        LOG4C_ERROR(logger, "could not accept on socket");
		goto done;
	}

	server_base[server_data->server_index] = base;

	event_base_dispatch(base);

done:
    if(server_data)
        free(server_data);
    if(http)
    {
        evhttp_free(http);
    }
	if (base)
	{
        server_base[server_data->server_index] = NULL;
	    event_base_free(base);
	}

	apr_atomic_dec32(&num_servers);

	return NULL;
}
示例#23
0
文件: fdqueue.c 项目: apresley/Jaxer
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
                                          apr_pool_t **recycled_pool)
{
    apr_status_t rv;

    *recycled_pool = NULL;

    /* Block if the count of idle workers is zero */
    if (queue_info->idlers == 0) {
        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
        /* Re-check the idle worker count to guard against a
         * race condition.  Now that we're in the mutex-protected
         * region, one of two things may have happened:
         *   - If the idle worker count is still zero, the
         *     workers are all still busy, so it's safe to
         *     block on a condition variable.
         *   - If the idle worker count is nonzero, then a
         *     worker has become idle since the first check
         *     of queue_info->idlers above.  It's possible
         *     that the worker has also signaled the condition
         *     variable--and if so, the listener missed it
         *     because it wasn't yet blocked on the condition
         *     variable.  But if the idle worker count is
         *     now nonzero, it's safe for this function to
         *     return immediately.
         */
        if (queue_info->idlers == 0) {
            rv = apr_thread_cond_wait(queue_info->wait_for_idler,
                                  queue_info->idlers_mutex);
            if (rv != APR_SUCCESS) {
                apr_status_t rv2;
                rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
                if (rv2 != APR_SUCCESS) {
                    return rv2;
                }
                return rv;
            }
        }
        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    /* Atomically decrement the idle worker count */
    apr_atomic_dec32(&(queue_info->idlers));

    /* Atomically pop a pool from the recycled list */
    for (;;) {
        struct recycled_pool *first_pool = queue_info->recycled_pools;
        if (first_pool == NULL) {
            break;
        }
        if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools), first_pool->next,
                              first_pool) == first_pool) {
            *recycled_pool = first_pool->pool;
            break;
        }
    }

    if (queue_info->terminated) {
        return APR_EOF;
    }
    else {
        return APR_SUCCESS;
    }
}
示例#24
0
文件: fdqueue.c 项目: Aimbot2/apache2
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
                                          apr_pool_t **recycled_pool)
{
    apr_status_t rv;

    *recycled_pool = NULL;

    /* Block if the count of idle workers is zero */
    if (queue_info->idlers == 0) {
        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
        /* Re-check the idle worker count to guard against a
         * race condition.  Now that we're in the mutex-protected
         * region, one of two things may have happened:
         *   - If the idle worker count is still zero, the
         *     workers are all still busy, so it's safe to
         *     block on a condition variable, BUT
         *     we need to check for idle worker count again
         *     when we are signaled since it can happen that
         *     we are signaled by a worker thread that went idle
         *     but received a context switch before it could
         *     tell us. If it does signal us later once it is on
         *     CPU again there might be no idle worker left.
         *     See
         *     https://issues.apache.org/bugzilla/show_bug.cgi?id=45605#c4
         *   - If the idle worker count is nonzero, then a
         *     worker has become idle since the first check
         *     of queue_info->idlers above.  It's possible
         *     that the worker has also signaled the condition
         *     variable--and if so, the listener missed it
         *     because it wasn't yet blocked on the condition
         *     variable.  But if the idle worker count is
         *     now nonzero, it's safe for this function to
         *     return immediately.
         */
        while (queue_info->idlers == 0) {
            rv = apr_thread_cond_wait(queue_info->wait_for_idler,
                                  queue_info->idlers_mutex);
            if (rv != APR_SUCCESS) {
                apr_status_t rv2;
                rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
                if (rv2 != APR_SUCCESS) {
                    return rv2;
                }
                return rv;
            }
        }
        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    /* Atomically decrement the idle worker count */
    apr_atomic_dec32(&(queue_info->idlers));

    /* Atomically pop a pool from the recycled list */

    /* This function is safe only as long as it is single threaded because
     * it reaches into the queue and accesses "next" which can change.
     * We are OK today because it is only called from the listener thread.
     * cas-based pushes do not have the same limitation - any number can
     * happen concurrently with a single cas-based pop.
     */

    for (;;) {
        struct recycled_pool *first_pool = queue_info->recycled_pools;
        if (first_pool == NULL) {
            break;
        }
        if (apr_atomic_casptr((void*)&(queue_info->recycled_pools), first_pool->next,
                              first_pool) == first_pool) {
            *recycled_pool = first_pool->pool;
            break;
        }
    }

    if (queue_info->terminated) {
        return APR_EOF;
    }
    else {
        return APR_SUCCESS;
    }
}
示例#25
0
文件: port.c 项目: Ga-vin/apache
static apr_status_t impl_pollset_poll(apr_pollset_t *pollset,
                                      apr_interval_time_t timeout,
                                      apr_int32_t *num,
                                      const apr_pollfd_t **descriptors)
{
    apr_os_sock_t fd;
    int ret, i, j;
    unsigned int nget;
    pfd_elem_t *ep;
    apr_status_t rv = APR_SUCCESS;
    apr_pollfd_t fp;

    nget = 1;

    pollset_lock_rings();

    apr_atomic_inc32(&pollset->p->waiting);

    while (!APR_RING_EMPTY(&(pollset->p->add_ring), pfd_elem_t, link)) {
        ep = APR_RING_FIRST(&(pollset->p->add_ring));
        APR_RING_REMOVE(ep, link);

        if (ep->pfd.desc_type == APR_POLL_SOCKET) {
            fd = ep->pfd.desc.s->socketdes;
        }
        else {
            fd = ep->pfd.desc.f->filedes;
        }

        ret = port_associate(pollset->p->port_fd, PORT_SOURCE_FD, 
                             fd, get_event(ep->pfd.reqevents), ep);
        if (ret < 0) {
            rv = apr_get_netos_error();
            APR_RING_INSERT_TAIL(&(pollset->p->free_ring), ep, pfd_elem_t, link);
            break;
        }

        ep->on_query_ring = 1;
        APR_RING_INSERT_TAIL(&(pollset->p->query_ring), ep, pfd_elem_t, link);
    }

    pollset_unlock_rings();

    if (rv != APR_SUCCESS) {
        apr_atomic_dec32(&pollset->p->waiting);
        return rv;
    }

    rv = call_port_getn(pollset->p->port_fd, pollset->p->port_set, 
                        pollset->nalloc, &nget, timeout);

    /* decrease the waiting ASAP to reduce the window for calling 
       port_associate within apr_pollset_add() */
    apr_atomic_dec32(&pollset->p->waiting);

    (*num) = nget;
    if (nget) {

        pollset_lock_rings();

        for (i = 0, j = 0; i < nget; i++) {
            fp = (((pfd_elem_t*)(pollset->p->port_set[i].portev_user))->pfd);
            if ((pollset->flags & APR_POLLSET_WAKEABLE) &&
                fp.desc_type == APR_POLL_FILE &&
                fp.desc.f == pollset->wakeup_pipe[0]) {
                apr_pollset_drain_wakeup_pipe(pollset);
                rv = APR_EINTR;
            }
            else {
                pollset->p->result_set[j] = fp;            
                pollset->p->result_set[j].rtnevents =
                    get_revent(pollset->p->port_set[i].portev_events);

                /* If the ring element is still on the query ring, move it
                 * to the add ring for re-association with the event port
                 * later.  (It may have already been moved to the dead ring
                 * by a call to pollset_remove on another thread.)
                 */
                ep = (pfd_elem_t *)pollset->p->port_set[i].portev_user;
                if (ep->on_query_ring) {
                    APR_RING_REMOVE(ep, link);
                    ep->on_query_ring = 0;
                    APR_RING_INSERT_TAIL(&(pollset->p->add_ring), ep,
                                         pfd_elem_t, link);
                }
                j++;
            }
        }
        pollset_unlock_rings();
        if ((*num = j)) { /* any event besides wakeup pipe? */
            rv = APR_SUCCESS;
            if (descriptors) {
                *descriptors = pollset->p->result_set;
            }
        }
    }

    pollset_lock_rings();

    /* Shift all PFDs in the Dead Ring to the Free Ring */
    APR_RING_CONCAT(&(pollset->p->free_ring), &(pollset->p->dead_ring), pfd_elem_t, link);

    pollset_unlock_rings();

    return rv;
}
示例#26
0
static apr_status_t handle_response(serf_request_t *request,
                                    serf_bucket_t *response,
                                    void *handler_baton,
                                    apr_pool_t *pool)
{
    const char *data;
    apr_size_t len;
    serf_status_line sl;
    apr_status_t status;
    handler_baton_t *ctx = handler_baton;

    if (!response) {
        /* Oh no!  We've been cancelled! */
        abort();
    }

    status = serf_bucket_response_status(response, &sl);
    if (status) {
        if (APR_STATUS_IS_EAGAIN(status)) {
            return APR_SUCCESS;
        }
        abort();
    }

    while (1) {
        status = serf_bucket_read(response, 2048, &data, &len);

        if (SERF_BUCKET_READ_ERROR(status))
            return status;

        /*fwrite(data, 1, len, stdout);*/

        if (!ctx->hdr_read) {
            serf_bucket_t *hdrs;
            const char *val;

            printf("Processing %s\n", ctx->path);

            hdrs = serf_bucket_response_get_headers(response);
            val = serf_bucket_headers_get(hdrs, "Content-Type");
            /* FIXME: This check isn't quite right because Content-Type could
             * be decorated; ideally strcasestr would be correct.
             */
            if (val && strcasecmp(val, "text/html") == 0) {
                ctx->is_html = 1;
                apr_pool_create(&ctx->parser_pool, NULL);
                ctx->parser = apr_xml_parser_create(ctx->parser_pool);
            }
            else {
                ctx->is_html = 0;
            }
            ctx->hdr_read = 1;
        }
        if (ctx->is_html) {
            apr_status_t xs;

            xs = apr_xml_parser_feed(ctx->parser, data, len);
            /* Uh-oh. */
            if (xs) {
#ifdef SERF_VERBOSE
                printf("XML parser error (feed): %d\n", xs);
#endif
                ctx->is_html = 0;
            }
        }

        /* are we done yet? */
        if (APR_STATUS_IS_EOF(status)) {

            if (ctx->is_html) {
                apr_xml_doc *xmld;
                apr_status_t xs;
                doc_path_t *dup;

                xs = apr_xml_parser_done(ctx->parser, &xmld);
                if (xs) {
#ifdef SERF_VERBOSE
                    printf("XML parser error (done): %d\n", xs);
#endif
                    return xs;
                }
                dup = (doc_path_t*)
                    serf_bucket_mem_alloc(ctx->doc_queue_alloc,
                                          sizeof(doc_path_t));
                dup->doc = xmld;
                dup->path = (char*)serf_bucket_mem_alloc(ctx->doc_queue_alloc,
                                                         ctx->path_len);
                memcpy(dup->path, ctx->path, ctx->path_len);
                dup->pool = ctx->parser_pool;

                *(doc_path_t **)apr_array_push(ctx->doc_queue) = dup;

                apr_thread_cond_signal(ctx->doc_queue_condvar);
            }

            apr_atomic_dec32(ctx->requests_outstanding);
            serf_bucket_mem_free(ctx->allocator, ctx->path);
            if (ctx->query) {
                serf_bucket_mem_free(ctx->allocator, ctx->query);
                serf_bucket_mem_free(ctx->allocator, ctx->full_path);
            }
            if (ctx->fragment) {
                serf_bucket_mem_free(ctx->allocator, ctx->fragment);
            }
            serf_bucket_mem_free(ctx->allocator, ctx);
            return APR_EOF;
        }

        /* have we drained the response so far? */
        if (APR_STATUS_IS_EAGAIN(status))
            return APR_SUCCESS;

        /* loop to read some more. */
    }
    /* NOTREACHED */
}
示例#27
0
文件: apr_dbd.c 项目: dtrebbien/apr
APR_DECLARE(apr_status_t) apr_dbd_init(apr_pool_t *pool)
{
    apr_status_t ret = APR_SUCCESS;
    apr_pool_t *parent;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;

        return APR_SUCCESS;
    }

    /* Top level pool scope, need process-scope lifetime */
    for (parent = pool;  parent; parent = apr_pool_parent_get(pool))
         pool = parent;
#if APR_HAVE_MODULAR_DSO
    /* deprecate in 2.0 - permit implicit initialization */
    apu_dso_init(pool);
#endif

    drivers = apr_hash_make(pool);

#if APR_HAS_THREADS
    ret = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
    /* This already registers a pool cleanup */
#endif

#if !APR_HAVE_MODULAR_DSO

    /* Load statically-linked drivers: */
#if APU_HAVE_MYSQL
    DRIVER_LOAD("mysql", apr_dbd_mysql_driver, pool);
#endif
#if APU_HAVE_PGSQL
    DRIVER_LOAD("pgsql", apr_dbd_pgsql_driver, pool);
#endif
#if APU_HAVE_SQLITE3
    DRIVER_LOAD("sqlite3", apr_dbd_sqlite3_driver, pool);
#endif
#if APU_HAVE_SQLITE2
    DRIVER_LOAD("sqlite2", apr_dbd_sqlite2_driver, pool);
#endif
#if APU_HAVE_ORACLE
    DRIVER_LOAD("oracle", apr_dbd_oracle_driver, pool);
#endif
#if APU_HAVE_FREETDS
    DRIVER_LOAD("freetds", apr_dbd_freetds_driver, pool);
#endif
#if APU_HAVE_ODBC
    DRIVER_LOAD("odbc", apr_dbd_odbc_driver, pool);
#endif
#if APU_HAVE_SOME_OTHER_BACKEND
    DRIVER_LOAD("firebird", apr_dbd_other_driver, pool);
#endif
#endif /* APR_HAVE_MODULAR_DSO */

    apr_pool_cleanup_register(pool, NULL, apr_dbd_term,
                              apr_pool_cleanup_null);

    apr_atomic_dec32(&in_init);

    return ret;
}