char * range_easy_compress(easy_lr* elr, const char ** c_nodes) { apr_pool_clear(elr->querypool); char * retval; // FIXME copy/reference bits from real lr into easy_lr to expose warnings/errors retval = strdup(range_compress(elr->lr, elr->querypool, c_nodes)); apr_pool_clear(elr->querypool); return retval; }
static void select_rows(abts_case *tc, apr_dbd_t* handle, const apr_dbd_driver_t* driver, int count) { apr_status_t rv; apr_pool_t* pool = p; apr_pool_t* tpool; const char* sql = "SELECT * FROM apr_dbd_test ORDER BY col1"; apr_dbd_results_t *res = NULL; apr_dbd_row_t *row = NULL; int i; rv = apr_dbd_select(driver, pool, handle, &res, sql, 0); ABTS_ASSERT(tc, sql, rv == APR_SUCCESS); ABTS_PTR_NOTNULL(tc, res); apr_pool_create(&tpool, pool); i = count; while (i > 0) { row = NULL; rv = apr_dbd_get_row(driver, pool, res, &row, -1); ABTS_ASSERT(tc, sql, rv == APR_SUCCESS); ABTS_PTR_NOTNULL(tc, row); apr_pool_clear(tpool); i--; } ABTS_ASSERT(tc, "Missing Rows!", i == 0); res = NULL; i = count; rv = apr_dbd_select(driver, pool, handle, &res, sql, 1); ABTS_ASSERT(tc, sql, rv == APR_SUCCESS); ABTS_PTR_NOTNULL(tc, res); rv = apr_dbd_num_tuples(driver, res); ABTS_ASSERT(tc, "invalid row count", rv == count); while (i > 0) { row = NULL; rv = apr_dbd_get_row(driver, pool, res, &row, i); ABTS_ASSERT(tc, sql, rv == APR_SUCCESS); ABTS_PTR_NOTNULL(tc, row); apr_pool_clear(tpool); i--; } ABTS_ASSERT(tc, "Missing Rows!", i == 0); rv = apr_dbd_get_row(driver, pool, res, &row, count+100); ABTS_ASSERT(tc, "If we overseek, get_row should return -1", rv == -1); }
void LLVolatileAPRPool::clearVolatileAPRPool() { if(mNumActiveRef > 0) { mNumActiveRef--; if(mNumActiveRef < 1) { if(isFull()) { mNumTotalRef = 0 ; //destroy the apr_pool. releaseAPRPool() ; } else { //This does not actually free the memory, //it just allows the pool to re-use this memory for the next allocation. apr_pool_clear(mPool) ; } } } else { llassert_always(mNumActiveRef > 0) ; } //paranoia check if the pool is jammed. //will remove the check before going to release. llassert_always(mNumTotalRef < (FULL_VOLATILE_APR_POOL << 2)) ; }
void logging_log(config_t *cfg, loglevel_e level, const char *fmt, ...) { va_list ap; char date[APR_RFC822_DATE_LEN]; struct iovec vec[4]; apr_size_t blen; if (cfg->loglevel < level) return; va_start(ap, fmt); apr_pool_clear(cfg->errorlog_p); apr_rfc822_date(date, apr_time_now()); vec[0].iov_base = date; vec[0].iov_len = APR_RFC822_DATE_LEN-1; vec[1].iov_base = " "; vec[1].iov_len = 2; vec[2].iov_base = apr_pvsprintf(cfg->errorlog_p, fmt, ap); vec[2].iov_len = strlen(vec[2].iov_base); vec[3].iov_base = "\n"; vec[3].iov_len = 1; if (level == LOGLEVEL_NOISE) { apr_file_writev(cfg->errorlog_fperr,&vec[2],2,&blen); } if (cfg->loglevel > LOGLEVEL_NONE && cfg->errorlog_fp) { apr_file_writev(cfg->errorlog_fp,vec,4,&blen); } va_end(ap); }
const char * range_easy_eval(easy_lr* elr, const char * c_range) { struct range_request* range_req; apr_pool_clear(elr->querypool); range_req = range_expand(elr->lr, elr->querypool, c_range); // FIXME copy/reference bits from real lr into easy_lr to expose warnings/errors return range_request_compressed(range_req); }
/* * This function assumes that either ctx->buffered_bb == NULL, or * ctx->buffered_bb is empty, or ctx->buffered_bb == bb */ static void setaside_remaining_output(ap_filter_t *f, core_output_filter_ctx_t *ctx, apr_bucket_brigade *bb, conn_rec *c) { if (bb == NULL) { return; } remove_empty_buckets(bb); if (!APR_BRIGADE_EMPTY(bb)) { c->data_in_output_filters = 1; if (bb != ctx->buffered_bb) { if (!ctx->deferred_write_pool) { apr_pool_create(&ctx->deferred_write_pool, c->pool); apr_pool_tag(ctx->deferred_write_pool, "deferred_write"); } ap_save_brigade(f, &(ctx->buffered_bb), &bb, ctx->deferred_write_pool); } } else if (ctx->deferred_write_pool) { /* * There are no more requests in the pipeline. We can just clear the * pool. */ apr_pool_clear(ctx->deferred_write_pool); } }
static void io_destroy(h2_mplx *m, h2_io *io, int events) { apr_pool_t *pool = io->pool; /* cleanup any buffered input */ h2_io_in_shutdown(io); if (events) { /* Process outstanding events before destruction */ io_process_events(m, io); } io->pool = NULL; /* The pool is cleared/destroyed which also closes all * allocated file handles. Give this count back to our * file handle pool. */ m->tx_handles_reserved += io->files_handles_owned; h2_io_set_remove(m->stream_ios, io); h2_io_set_remove(m->ready_ios, io); h2_io_destroy(io); if (pool) { apr_pool_clear(pool); if (m->spare_pool) { apr_pool_destroy(m->spare_pool); } m->spare_pool = pool; } check_tx_free(m); }
static void service_callback(AVAHI_GCC_UNUSED AvahiEntryGroup *g, AvahiEntryGroupState state, void *userdata) { struct service_data *j = userdata; switch (state) { case AVAHI_ENTRY_GROUP_UNCOMMITED: case AVAHI_ENTRY_GROUP_REGISTERING: case AVAHI_ENTRY_GROUP_ESTABLISHED: break; case AVAHI_ENTRY_GROUP_COLLISION: { char *n; ap_assert(j->chosen_name); n = avahi_alternative_service_name(j->chosen_name); ap_log_error(APLOG_MARK, APLOG_WARNING, 0, j->runtime->main_server, "Name collision on '%s', changing to '%s'", j->chosen_name, n); apr_pool_clear(j->pool); j->chosen_name = apr_pstrdup(j->pool, n); create_service(j); break; } case AVAHI_ENTRY_GROUP_FAILURE: ap_log_error(APLOG_MARK, APLOG_ERR, 0, j->runtime->main_server, "Failed to register service: %s", avahi_strerror(avahi_client_errno(j->runtime->client))); break; } }
/** Nothing - apache will take care. XXX with jk pools we can implement 'recycling', not sure what's the equivalent for apache */ static void jk2_pool_apr_reset(jk_env_t *env, jk_pool_t *p) { #ifdef JK_APR_POOL_DEBUG fprintf(stderr, "apr_reset %#lx\n", p); #endif apr_pool_clear(p->_private); }
/*++ LogFormatV Adds an entry to the log file. Arguments: level - Log severity level. format - Pointer to a buffer containing a printf-style format string. argList - Argument list to insert into 'format'. Return Values: None. Remarks: This function could be called before the logging subsystem is initialized. --*/ void LogFormatV( apr_uint32_t level, const char *format, va_list argList ) { apr_time_exp_t now; char *message; ASSERT(format != NULL); if (level <= maxLevel && handle != NULL) { // Write local time apr_time_exp_lt(&now, apr_time_now()); apr_file_printf(handle, "%04d-%02d-%02d %02d:%02d:%02d - ", now.tm_year+1900, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec); // Format and write log message message = apr_pvsprintf(msgPool, format, argList); if (message == NULL) { message = "Unable to format message." APR_EOL_STR; } apr_file_puts(message, handle); apr_file_flush(handle); // Clear memory allocated when formatting the message apr_pool_clear(msgPool); } }
static apr_status_t talkTalk(apr_socket_t *socket, apr_pool_t *parent) { apr_pool_t *pool; apr_size_t len; char *buf; apr_status_t rv; if (apr_pool_create(&pool, parent) != APR_SUCCESS) return APR_ENOPOOL; buf = apr_palloc(pool, BUF_SIZE); if (!buf) return ENOMEM; do { len = BUF_SIZE; rv = apr_socket_recv(socket, buf, &len); if (APR_STATUS_IS_EOF(rv) || len == 0 || rv != APR_SUCCESS) break; rv = apr_socket_send(socket, buf, &len); if (len == 0 || rv != APR_SUCCESS) break; } while (rv == APR_SUCCESS); apr_pool_clear(pool); return APR_SUCCESS; }
/* RTSP connection disconnected */ static apt_bool_t rtsp_server_on_disconnect(apt_net_server_task_t *task, apt_net_server_connection_t *connection) { apr_size_t remaining_sessions = 0; rtsp_server_t *server = apt_net_server_task_object_get(task); rtsp_server_connection_t *rtsp_connection = connection->obj; apt_list_elem_remove(server->connection_list,rtsp_connection->it); rtsp_connection->it = NULL; if(apt_list_is_empty(server->connection_list) == TRUE) { apr_pool_clear(server->sub_pool); server->connection_list = NULL; } remaining_sessions = apr_hash_count(rtsp_connection->session_table); if(remaining_sessions) { rtsp_server_session_t *session; void *val; apr_hash_index_t *it; apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Terminate Remaining RTSP Sessions [%d]",remaining_sessions); it = apr_hash_first(connection->pool,rtsp_connection->session_table); for(; it; it = apr_hash_next(it)) { apr_hash_this(it,NULL,NULL,&val); session = val; if(session && session->terminating == FALSE) { rtsp_server_session_terminate_request(server,session); } } } else { apt_net_server_connection_destroy(connection); } return TRUE; }
void mpm_recycle_completion_context(PCOMP_CONTEXT context) { /* Recycle the completion context. * - clear the ptrans pool * - put the context on the queue to be consumed by the accept thread * Note: * context->accept_socket may be in a disconnected but reusable * state so -don't- close it. */ if (context) { apr_pool_clear(context->ptrans); context->ba = apr_bucket_alloc_create(context->ptrans); context->next = NULL; ResetEvent(context->Overlapped.hEvent); apr_thread_mutex_lock(qlock); if (qtail) { qtail->next = context; } else { qhead = context; SetEvent(qwait_event); } qtail = context; apr_thread_mutex_unlock(qlock); } }
static void test_term_query_impl( CuTest* tc, const char *index_dir ) { apr_pool_t* pool, *cp; lcn_searcher_t* searcher; unsigned int doc_freq, i; lcn_term_t* term; char* query_str; lcn_query_t* query, *clone; lcn_hits_t* hits; apr_pool_create( &pool, main_pool ); apr_pool_create( &cp, pool ); LCN_TEST( lcn_term_create( &term, "text", "can", 1, pool ) ); LCN_TEST( lcn_term_query_create( &query, term, pool ) ); lcn_query_set_name( query, "a named query" ); CuAssertStrEquals( tc, "a named query", lcn_query_name( query )); LCN_TEST( lcn_query_clone( query, &clone, pool ) ); CuAssertStrEquals( tc, "a named query", lcn_query_name( clone )); LCN_TEST( lcn_query_to_string( query, &query_str, "", pool ) ); CuAssertStrEquals( tc, query_str, "text:can" ); LCN_TEST( lcn_index_searcher_create_by_path( &searcher, index_dir, pool ) ); CuAssertIntEquals( tc, 331, lcn_searcher_max_doc( searcher ) ); LCN_TEST( lcn_searcher_doc_freq( searcher, term, &doc_freq ) ); CuAssertIntEquals( tc, 9, doc_freq ); LCN_TEST( lcn_searcher_search( searcher, &hits, clone, NULL, pool ) ); CuAssertIntEquals( tc, 9, lcn_hits_length( hits ) ); for( i = 0; i < 9; i++ ) { lcn_document_t* doc; char* id, *text; apr_pool_clear( cp ); LCN_TEST( lcn_hits_doc( hits, &doc, i, cp ) ); LCN_TEST( lcn_document_get( doc, &id, "id", cp ) ); LCN_TEST( lcn_document_get( doc, &text, "text", cp ) ); CuAssertStrEquals( tc, test_ids[i], id ); } apr_pool_destroy( pool ); }
static range* _expand_cluster(range_request* rr, const char* cluster, const char* section) { struct stat st; const char* res; libcrange* lr = range_request_lr(rr); set* cache = libcrange_get_cache(lr, "nodescf:cluster_keys"); apr_pool_t* req_pool = range_request_pool(rr); apr_pool_t* lr_pool = range_request_lr_pool(rr); const char* cluster_file; cache_entry* e; if (strcmp(section, "VIPS") == 0) return _cluster_vips(rr, cluster); if (strcmp(section, "VIPHOSTS") == 0) return _cluster_viphosts(rr, cluster); cluster_file = apr_psprintf(req_pool, "%s/%s/nodes.cf", nodescf_path, cluster); if (!cache) { cache = set_new(lr_pool, 0); libcrange_set_cache(lr, "nodescf:cluster_keys", cache); } if (stat(cluster_file, &st) == -1) { range_request_warn_type(rr, "NOCLUSTERDEF", cluster); return range_new(rr); } e = set_get_data(cache, cluster_file); if (!e) { e = apr_palloc(lr_pool, sizeof(struct cache_entry)); apr_pool_create(&e->pool, lr_pool); e->sections = _cluster_keys(rr, e->pool, cluster, cluster_file); e->mtime = st.st_mtime; set_add(cache, cluster_file, e); } else { time_t cached_mtime = e->mtime; if (cached_mtime != st.st_mtime) { apr_pool_clear(e->pool); e->sections = _cluster_keys(rr, e->pool, cluster, cluster_file); e->mtime = st.st_mtime; } } res = set_get_data(e->sections, section); if (!res) { char* cluster_section = apr_psprintf(req_pool, "%s:%s", cluster, section); range_request_warn_type(rr, "NOCLUSTER", cluster_section); return range_new(rr); } return do_range_expand(rr, res); }
static PCOMP_CONTEXT win9x_get_connection(PCOMP_CONTEXT context) { apr_os_sock_info_t sockinfo; int len, salen; #if APR_HAVE_IPV6 salen = sizeof(struct sockaddr_in6); #else salen = sizeof(struct sockaddr_in); #endif if (context == NULL) { /* allocate the completion context and the transaction pool */ apr_allocator_t *allocator; apr_thread_mutex_lock(child_lock); context = apr_pcalloc(pchild, sizeof(COMP_CONTEXT)); apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator); apr_allocator_owner_set(allocator, context->ptrans); apr_pool_tag(context->ptrans, "transaction"); apr_thread_mutex_unlock(child_lock); } while (1) { apr_pool_clear(context->ptrans); context->ba = apr_bucket_alloc_create(context->ptrans); context->accept_socket = remove_job(); if (context->accept_socket == INVALID_SOCKET) { return NULL; } len = salen; context->sa_server = apr_palloc(context->ptrans, len); if (getsockname(context->accept_socket, context->sa_server, &len)== SOCKET_ERROR) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, "getsockname failed"); continue; } len = salen; context->sa_client = apr_palloc(context->ptrans, len); if ((getpeername(context->accept_socket, context->sa_client, &len)) == SOCKET_ERROR) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, "getpeername failed"); memset(&context->sa_client, '\0', sizeof(context->sa_client)); } sockinfo.os_sock = &context->accept_socket; sockinfo.local = context->sa_server; sockinfo.remote = context->sa_client; sockinfo.family = context->sa_server->sa_family; sockinfo.type = SOCK_STREAM; apr_os_sock_make(&context->sock, &sockinfo, context->ptrans); return context; } }
static void mag_conn_clear(struct mag_conn *mc) { (void)mag_conn_destroy(mc); apr_pool_t *temp; apr_pool_clear(mc->pool); temp = mc->pool; memset(mc, 0, sizeof(struct mag_conn)); mc->pool = temp; }
nsvn_t* nsvn_base_clear (nsvn_t *nsvn) { if (nsvn) { svn_error_clear (nsvn->err); apr_pool_clear (nsvn->pool); } return nsvn; }
///////////////////////////////////////////////////////////////////////////////// // // Function: // // Purpose: // // Parameters: // // Return value: // // Author: Komatsu Yuji(Zheng Chuyu) // ///////////////////////////////////////////////////////////////////////////////// void jhklog_close(void) { if (jx_log == NULL) return; if (jx_log->fp != NULL) apr_file_close(jx_log->fp); jx_log->fp = NULL; apr_pool_clear(jx_log->pool); }
/* Destroy RTSP connection */ static apt_bool_t rtsp_client_connection_destroy(rtsp_client_t *client, rtsp_client_connection_t *rtsp_connection) { apt_list_elem_remove(client->connection_list,rtsp_connection->it); apt_net_client_disconnect(client->task,rtsp_connection->base); if(apt_list_is_empty(client->connection_list) == TRUE) { apr_pool_clear(client->sub_pool); client->connection_list = NULL; } return TRUE; }
static void im_ssl_listen(nx_module_t *module) { nx_im_ssl_conf_t *imconf; nx_exception_t e; imconf = (nx_im_ssl_conf_t *) module->config; try { if ( imconf->listensock == NULL ) { apr_sockaddr_t *sa; CHECKERR_MSG(apr_socket_create(&(imconf->listensock), APR_INET, SOCK_STREAM, APR_PROTO_TCP, module->input.pool), "couldn't create tcp socket"); CHECKERR_MSG(apr_sockaddr_info_get(&sa, imconf->host, APR_INET, imconf->port, 0, module->input.pool), "apr_sockaddr_info failed for %s:%d", imconf->host, imconf->port); CHECKERR_MSG(apr_socket_opt_set(imconf->listensock, APR_SO_NONBLOCK, 1), "couldn't set SO_NONBLOCK on listen socket"); CHECKERR_MSG(apr_socket_timeout_set(imconf->listensock, 0), "couldn't set socket timeout on listen socket"); CHECKERR_MSG(apr_socket_opt_set(imconf->listensock, APR_SO_REUSEADDR, 1), "couldn't set SO_REUSEADDR on listen socket"); CHECKERR_MSG(apr_socket_opt_set(imconf->listensock, APR_TCP_NODELAY, 1), "couldn't set TCP_NODELAY on listen socket"); CHECKERR_MSG(apr_socket_bind(imconf->listensock, sa), "couldn't bind ssl socket to %s:%d", imconf->host, imconf->port); } else { log_debug("ssl socket already initialized"); } CHECKERR_MSG(apr_socket_listen(imconf->listensock, SOMAXCONN), "couldn't listen to ssl socket on %s:%d", imconf->host, imconf->port); nx_module_pollset_add_socket(module, imconf->listensock, APR_POLLIN | APR_POLLHUP); } catch(e) { if ( imconf->listensock != NULL ) { apr_socket_close(imconf->listensock); imconf->listensock = NULL; } apr_pool_clear(module->input.pool); rethrow(e); } }
void cacheDestroy (cache * c, request_data *rd) /*{{{ */ { while (c->sentinel->down != c->sentinel) { listremoveitem (c, c->sentinel->down, rd); } cacheheap_heapclose(c->heap); entrytable_close (c->htable); apr_pool_clear (c->pool); return; } /*}}} */
static void* APR_THREAD_FUNC seed_thread(apr_thread_t *thread, void *data) #endif { mapcache_tile *tile; mapcache_context seed_ctx = ctx; seed_ctx.log = seed_log; apr_pool_create(&seed_ctx.pool,ctx.pool); tile = mapcache_tileset_tile_create(ctx.pool, tileset, grid_link); tile->dimensions = dimensions; while(1) { struct seed_cmd cmd; apr_status_t ret; apr_pool_clear(seed_ctx.pool); ret = pop_queue(&cmd); if(ret != APR_SUCCESS || cmd.command == MAPCACHE_CMD_STOP) break; tile->x = cmd.x; tile->y = cmd.y; tile->z = cmd.z; if(cmd.command == MAPCACHE_CMD_SEED) { /* aquire a lock on the metatile ?*/ mapcache_metatile *mt = mapcache_tileset_metatile_get(&seed_ctx, tile); int isLocked = mapcache_lock_or_wait_for_resource(&seed_ctx, mapcache_tileset_metatile_resource_key(&seed_ctx,mt)); if(isLocked == MAPCACHE_TRUE) { /* this will query the source to create the tiles, and save them to the cache */ mapcache_tileset_render_metatile(&seed_ctx, mt); mapcache_unlock_resource(&seed_ctx, mapcache_tileset_metatile_resource_key(&seed_ctx,mt)); } } else if (cmd.command == MAPCACHE_CMD_TRANSFER) { int i; mapcache_metatile *mt = mapcache_tileset_metatile_get(&seed_ctx, tile); for (i = 0; i < mt->ntiles; i++) { mapcache_tile *subtile = &mt->tiles[i]; mapcache_tileset_tile_get(&seed_ctx, subtile); subtile->tileset = tileset_transfer; tileset_transfer->cache->tile_set(&seed_ctx, subtile); } } else { //CMD_DELETE mapcache_tileset_tile_delete(&seed_ctx,tile,MAPCACHE_TRUE); } if(seed_ctx.get_error(&seed_ctx)) { error_detected++; ctx.log(&ctx,MAPCACHE_INFO,seed_ctx.get_error_message(&seed_ctx)); } } #ifdef USE_FORK return 0; #else apr_thread_exit(thread,MAPCACHE_SUCCESS); return NULL; #endif }
static void * APR_THREAD_FUNC parser_thread(apr_thread_t *thread, void *data) { apr_status_t status; apr_pool_t *pool, *subpool; parser_baton_t *ctx; ctx = (parser_baton_t*)data; pool = apr_thread_pool_get(thread); apr_pool_create(&subpool, pool); while (1) { doc_path_t *dup; apr_pool_clear(subpool); /* Grab it. */ apr_thread_mutex_lock(ctx->mutex); /* Sleep. */ apr_thread_cond_wait(ctx->condvar, ctx->mutex); /* Fetch the doc off the list. */ if (ctx->doc_queue->nelts) { dup = *(doc_path_t**)(apr_array_pop(ctx->doc_queue)); /* dup = (ctx->doc_queue->conns->elts)[0]; */ } else { dup = NULL; } /* Don't need the mutex now. */ apr_thread_mutex_unlock(ctx->mutex); /* Parse the doc/url pair. */ if (dup) { status = find_href_doc(dup->doc, dup->path, ctx, subpool); if (status) { printf("Error finding hrefs: %d %s\n", status, dup->path); } /* Free the doc pair and its pool. */ apr_pool_destroy(dup->pool); serf_bucket_mem_free(ctx->doc_queue_alloc, dup->path); serf_bucket_mem_free(ctx->doc_queue_alloc, dup); } /* Hey are we done? */ if (!apr_atomic_read32(ctx->requests_outstanding)) { break; } } return NULL; }
int main(int argc, const char *const argv[]) { int i; int rv; int list_provided = 0; abts_suite *suite = NULL; initialize(); quiet = !isatty(STDOUT_FILENO); for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "-v")) { verbose = 1; continue; } if (!strcmp(argv[i], "-x")) { exclude = 1; continue; } if (!strcmp(argv[i], "-l")) { list_tests = 1; continue; } if (!strcmp(argv[i], "-q")) { quiet = 1; continue; } if (argv[i][0] == '-') { fprintf(stderr, "Invalid option: `%s'\n", argv[i]); exit(1); } list_provided = 1; } if (list_provided) { /* Waste a little space here, because it is easier than counting the * number of tests listed. Besides it is at most three char *. */ testlist = calloc(argc + 1, sizeof(char *)); for (i = 1; i < argc; i++) { testlist[i - 1] = argv[i]; } } for (i = 0; i < (sizeof(alltests) / sizeof(struct testlist *)); i++) { suite = alltests[i].func(suite); apr_pool_clear(p); } rv = report(suite); return rv; }
static void test_omit_norms( CuTest* tc ) { apr_pool_t* pool, *cp; lcn_searcher_t* searcher; unsigned int doc_freq; lcn_term_t* term; lcn_query_t* query; lcn_hits_t* hits; apr_pool_create( &pool, main_pool ); apr_pool_create( &cp, pool ); LCN_TEST( lcn_term_create( &term, "titel", "second", 1, pool ) ); LCN_TEST( lcn_term_query_create( &query, term, pool ) ); LCN_TEST( lcn_index_searcher_create_by_path( &searcher, "index_writer/index_10", pool ) ); CuAssertIntEquals( tc, 5, lcn_searcher_max_doc( searcher ) ); LCN_TEST( lcn_searcher_doc_freq( searcher, term, &doc_freq ) ); CuAssertIntEquals( tc, 1, doc_freq ); LCN_TEST( lcn_searcher_search( searcher, &hits, query, NULL, pool ) ); CuAssertIntEquals( tc, 1, lcn_hits_length( hits ) ); { lcn_document_t* doc; const char *value; lcn_field_t *field; char val[10]; apr_pool_clear( cp ); LCN_TEST( lcn_hits_doc( hits, &doc, 0, cp ) ); LCN_TEST( lcn_document_get_field( doc, "text", &field ) ); CuAssertIntEquals( tc, 4, lcn_field_size( field )); value = lcn_field_value( field ); memcpy( val, value, lcn_field_size( field )); val[ lcn_field_size( field ) ] = '\0'; CuAssertStrEquals( tc, "123\326", val ); } apr_pool_destroy( pool ); }
static void reset_services(struct runtime_data *r) { struct service_data *j; ap_assert(r); for (j = r->services; j; j = j->next) { if (j->group) avahi_entry_group_reset(j->group); if (j->pool) apr_pool_clear(j->pool); j->chosen_name = NULL; } }
static apr_status_t groups_for_user(apr_pool_t *p, char *user, char *grpfile, apr_table_t ** out) { ap_configfile_t *f; apr_table_t *grps = apr_table_make(p, 15); apr_pool_t *sp; struct ap_varbuf vb; const char *group_name, *ll, *w; apr_status_t status; apr_size_t group_len; if ((status = ap_pcfg_openfile(&f, p, grpfile)) != APR_SUCCESS) { return status ; } apr_pool_create(&sp, p); ap_varbuf_init(p, &vb, VARBUF_INIT_LEN); while (!(ap_varbuf_cfg_getline(&vb, f, VARBUF_MAX_LEN))) { if ((vb.buf[0] == '#') || (!vb.buf[0])) { continue; } ll = vb.buf; apr_pool_clear(sp); group_name = ap_getword(sp, &ll, ':'); group_len = strlen(group_name); while (group_len && apr_isspace(*(group_name + group_len - 1))) { --group_len; } while (ll[0]) { w = ap_getword_conf(sp, &ll); if (!strcmp(w, user)) { apr_table_setn(grps, apr_pstrmemdup(p, group_name, group_len), "in"); break; } } } ap_cfg_closefile(f); apr_pool_destroy(sp); ap_varbuf_free(&vb); *out = grps; return APR_SUCCESS; }
apr_status_t run_client_and_mock_servers_loops(test_baton_t *tb, int num_requests, handler_baton_t handler_ctx[], apr_pool_t *pool) { apr_pool_t *iter_pool; int i, done = 0; MockHTTP *mh = tb->mh; apr_status_t status; apr_time_t finish_time = apr_time_now() + apr_time_from_sec(15); apr_pool_create(&iter_pool, pool); while (!done) { mhError_t err; apr_pool_clear(iter_pool); /* run server event loop */ err = mhRunServerLoop(mh); /* Even if the mock server returned an error, it may have written something to the client. So process that data first, handle the error later. */ /* run client event loop */ status = serf_context_run(tb->context, 0, iter_pool); if (!APR_STATUS_IS_TIMEUP(status) && SERF_BUCKET_READ_ERROR(status)) return status; done = 1; for (i = 0; i < num_requests; i++) done &= handler_ctx[i].done; if (!done && (apr_time_now() > finish_time)) return APR_ETIMEDOUT; if (err == MOCKHTTP_TEST_FAILED) return SERF_ERROR_ISSUE_IN_TESTSUITE; } apr_pool_destroy(iter_pool); return APR_SUCCESS; }
abts_suite *testud(abts_suite *suite) { suite = ADD_SUITE(suite) apr_pool_create(&pool, p); testdata = apr_pstrdup(pool, "This is a test\n"); abts_run_test(suite, set_userdata, NULL); abts_run_test(suite, get_userdata, NULL); abts_run_test(suite, get_nonexistkey, NULL); apr_pool_clear(pool); abts_run_test(suite, post_pool_clear, NULL); return suite; }