示例#1
0
static tb_bool_t tb_directory_walk_impl(tb_char_t const* path, tb_bool_t recursion, tb_bool_t prefix, tb_directory_walk_func_t func, tb_cpointer_t priv)
{
    // check
    tb_assert_and_check_return_val(path && func, tb_false);

    // last
    tb_long_t       last = tb_strlen(path) - 1;
    tb_assert_and_check_return_val(last >= 0, tb_false);

    // done 
    tb_bool_t       ok = tb_true;
    tb_char_t       temp[4096] = {0};
    DIR*            directory = tb_null;
    if ((directory = opendir(path)))
    {
        // walk
        struct dirent* item = tb_null;
        while ((item = readdir(directory)))
        {
            // check
            tb_assert_and_check_continue(item->d_reclen);

            // the item name
            tb_char_t name[1024] = {0};
            tb_strncpy(name, item->d_name, tb_min(item->d_reclen, sizeof(name) - 1));
            if (tb_strcmp(name, ".") && tb_strcmp(name, ".."))
            {
                // the temp path
                tb_long_t n = tb_snprintf(temp, 4095, "%s%s%s", path, path[last] == '/'? "" : "/", name);
                if (n >= 0) temp[n] = '\0';

                // the file info
                tb_file_info_t info = {0};
                if (tb_file_info(temp, &info))
                {
                    // do callback
                    if (prefix) ok = func(temp, &info, priv);
                    tb_check_break(ok);

                    // walk to the next directory
                    if (info.type == TB_FILE_TYPE_DIRECTORY && recursion) ok = tb_directory_walk_impl(temp, recursion, prefix, func, priv);
                    tb_check_break(ok);
    
                    // do callback
                    if (!prefix) ok = func(temp, &info, priv);
                    tb_check_break(ok);
                }
            }
        }

        // exit directory
        closedir(directory);
    }

    // continue ?
    return ok;
}
示例#2
0
文件: json.c 项目: ljx0305/tbox
static tb_bool_t tb_oc_json_writer_func_array(tb_oc_json_writer_t* writer, tb_object_ref_t object, tb_size_t level)
{
    // check
    tb_assert_and_check_return_val(writer && writer->stream, tb_false);

    // writ
    if (tb_oc_array_size(object))
    {
        // writ beg
        if (tb_stream_printf(writer->stream, "[") < 0) return tb_false;
        if (!tb_oc_writer_newline(writer->stream, writer->deflate)) return tb_false;

        // walk
        tb_for_all (tb_object_ref_t, item, tb_oc_array_itor(object))
        {
            // item
            if (item)
            {
                // func
                tb_oc_json_writer_func_t func = tb_oc_json_writer_func(item->type);
                tb_assert_and_check_continue(func);

                // writ tab
                if (item_itor != item_head)
                {
                    if (!tb_oc_writer_tab(writer->stream, writer->deflate, level)) return tb_false;
                    if (tb_stream_printf(writer->stream, ",") < 0) return tb_false;
                    if (!tb_oc_writer_tab(writer->stream, writer->deflate, 1)) return tb_false;
                }
                else if (!tb_oc_writer_tab(writer->stream, writer->deflate, level + 1)) return tb_false;

                // writ
                if (!func(writer, item, level + 1)) return tb_false;
            }
        }

        // writ end
        if (!tb_oc_writer_tab(writer->stream, writer->deflate, level)) return tb_false;
        if (tb_stream_printf(writer->stream, "]") < 0) return tb_false;
        if (!tb_oc_writer_newline(writer->stream, writer->deflate)) return tb_false;
    }
    else 
    {
        if (tb_stream_printf(writer->stream, "[]") < 0) return tb_false;
示例#3
0
文件: dns.c 项目: siwuxian/xmake
/* //////////////////////////////////////////////////////////////////////////////////////
 * interfaces
 */
tb_bool_t tb_dns_init_env()
{
    // done
    tb_size_t   count = 0;
    tb_handle_t library = tb_dynamic_init("libresolv.dylib");
    if (library) 
    {
        // the res_ninit func
        tb_dns_res_ninit_func_t pres_ninit = (tb_dns_res_ninit_func_t)tb_dynamic_func(library, "res_9_ninit");
        if (pres_ninit)
        {
            // init state
            struct __res_state state;
            if (!pres_ninit(&state))
            {
                // walk it
                tb_size_t i = 0;
                for (i = 0; i < state.nscount; i++, count++)
                {
                    // the address
                    tb_char_t const* addr = inet_ntoa(state.nsaddr_list[i].sin_addr);
                    tb_assert_and_check_continue(addr);

                    // trace
                    tb_trace_d("addr: %s", addr);

                    // add address
                    tb_dns_server_add(addr);
                }
            }
        }
    }

    // ok
    return tb_true;
}
示例#4
0
tb_void_t tb_aicp_loop_util(tb_aicp_ref_t aicp, tb_bool_t (*stop)(tb_cpointer_t priv), tb_cpointer_t priv)
{   
    // check
    tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp;
    tb_assert_and_check_return(impl);
   
    // the ptor 
    tb_aicp_ptor_impl_t* ptor = impl->ptor;
    tb_assert_and_check_return(ptor && ptor->loop_spak);

    // the loop spak
    tb_long_t (*loop_spak)(tb_aicp_ptor_impl_t* , tb_handle_t, tb_aice_ref_t , tb_long_t ) = ptor->loop_spak;

    // worker++
    tb_atomic_fetch_and_inc(&impl->work);

    // init loop
    tb_handle_t loop = ptor->loop_init? ptor->loop_init(ptor) : tb_null;
 
    // trace
    tb_trace_d("loop[%p]: init", loop);

    // spak ctime
    tb_cache_time_spak();

    // loop
    while (1)
    {
        // spak
        tb_aice_t   resp = {0};
        tb_long_t   ok = loop_spak(ptor, loop, &resp, -1);

        // spak ctime
        tb_cache_time_spak();

        // failed?
        tb_check_break(ok >= 0);

        // timeout?
        tb_check_continue(ok);

        // check aico
        tb_aico_impl_t* aico = (tb_aico_impl_t*)resp.aico;
        tb_assert_and_check_continue(aico);

        // trace
        tb_trace_d("loop[%p]: spak: code: %lu, aico: %p, state: %s: %ld", loop, resp.code, aico, aico? tb_state_cstr(tb_atomic_get(&aico->state)) : "null", ok);

        // pending? clear state if be not accept or accept failed
        tb_size_t state = TB_STATE_OPENED;
        state = (resp.code != TB_AICE_CODE_ACPT || resp.state != TB_STATE_OK)? tb_atomic_fetch_and_pset(&aico->state, TB_STATE_PENDING, state) : tb_atomic_get(&aico->state);

        // killed or killing?
        if (state == TB_STATE_KILLED || state == TB_STATE_KILLING)
        {
            // update the aice state 
            resp.state = TB_STATE_KILLED;

            // killing? update to the killed state
            tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED);
        }

        // done func, @note maybe the aico exit will be called
        if (resp.func && !resp.func(&resp)) 
        {
            // trace
#ifdef __tb_debug__
            tb_trace_e("loop[%p]: done aice func failed with code: %lu at line: %lu, func: %s, file: %s!", loop, resp.code, aico->line, aico->func, aico->file);
#else
            tb_trace_e("loop[%p]: done aice func failed with code: %lu!", loop, resp.code);
#endif
        }

        // killing? update to the killed state
        tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED);

        // stop it?
        if (stop && stop(priv)) tb_aicp_kill(aicp);
    }

    // exit loop
    if (ptor->loop_exit) ptor->loop_exit(ptor, loop);

    // worker--
    tb_atomic_fetch_and_dec(&impl->work);

    // trace
    tb_trace_d("loop[%p]: exit", loop);
}
示例#5
0
文件: thread_pool.c 项目: luxuan/tbox
static tb_thread_pool_job_t* tb_thread_pool_jobs_post_task(tb_thread_pool_impl_t* impl, tb_thread_pool_task_t const* task, tb_size_t* post_size)
{
    // check
    tb_assert_and_check_return_val(impl && task && task->done && post_size, tb_null);

    // done
    tb_bool_t               ok = tb_false;
    tb_thread_pool_job_t*   job = tb_null;
    do
    {
        // check
        tb_assert_and_check_break(tb_list_entry_size(&impl->jobs_waiting) + tb_list_entry_size(&impl->jobs_urgent) + 1 < TB_THREAD_POOL_JOBS_WAITING_MAXN);

        // make job
        job = (tb_thread_pool_job_t*)tb_fixed_pool_malloc0(impl->jobs_pool);
        tb_assert_and_check_break(job);

        // init job
        job->refn   = 1;
        job->state  = TB_STATE_WAITING;
        job->task   = *task;

        // non-urgent job? 
        if (!task->urgent)
        {
            // post to the waiting jobs
            tb_list_entry_insert_tail(&impl->jobs_waiting, &job->entry);
        }
        else
        {
            // post to the urgent jobs
            tb_list_entry_insert_tail(&impl->jobs_urgent, &job->entry);
        }

        // the waiting jobs count
        tb_size_t jobs_waiting_count = tb_list_entry_size(&impl->jobs_waiting) + tb_list_entry_size(&impl->jobs_urgent);
        tb_assert_and_check_break(jobs_waiting_count);

        // update the post size
        if (*post_size < impl->worker_size) (*post_size)++;

        // trace
        tb_trace_d("task[%p:%s]: post: %lu: ..", task->done, task->name, *post_size);

        // init them if the workers have been not inited
        if (impl->worker_size < jobs_waiting_count)
        {
            tb_size_t i = impl->worker_size;
            tb_size_t n = tb_min(jobs_waiting_count, impl->worker_maxn);
            for (; i < n; i++)
            {
                // the worker 
                tb_thread_pool_worker_t* worker = &impl->worker_list[i];

                // clear worker
                tb_memset(worker, 0, sizeof(tb_thread_pool_worker_t));

                // init worker
                worker->id          = i;
                worker->pool        = (tb_thread_pool_ref_t)impl;
                worker->loop        = tb_thread_init(__tb_lstring__("thread_pool"), tb_thread_pool_worker_loop, worker, impl->stack);
                tb_assert_and_check_continue(worker->loop);
            }

            // update the worker size
            impl->worker_size = i;
        }

        // ok
        ok = tb_true;
    
    } while (0);

    // failed?
    if (!ok)
    {
        // exit it
        tb_fixed_pool_free(impl->jobs_pool, job);
        job = tb_null;
    }

    // ok?
    return job;
}
示例#6
0
文件: thread_pool.c 项目: luxuan/tbox
static tb_pointer_t tb_thread_pool_worker_loop(tb_cpointer_t priv)
{
    // the worker
    tb_thread_pool_worker_t* worker = (tb_thread_pool_worker_t*)priv;

    // trace
    tb_trace_d("worker[%lu]: init", worker? worker->id : -1);

    // done
    do
    {
        // check
        tb_assert_and_check_break(worker && !worker->jobs && !worker->stats);

        // the pool
        tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)worker->pool;
        tb_assert_and_check_break(impl && impl->semaphore);

        // wait some time for leaving the lock
        tb_msleep((worker->id + 1)* 20);

        // init jobs
        worker->jobs = tb_vector_init(TB_THREAD_POOL_JOBS_WORKING_GROW, tb_element_ptr(tb_null, tb_null));
        tb_assert_and_check_break(worker->jobs);

        // init stats
        worker->stats = tb_hash_map_init(TB_HASH_MAP_BUCKET_SIZE_MICRO, tb_element_ptr(tb_null, tb_null), tb_element_mem(sizeof(tb_thread_pool_job_stats_t), tb_null, tb_null));
        tb_assert_and_check_break(worker->stats);
        
        // loop
        while (1)
        {
            // pull jobs if be idle
            if (!tb_vector_size(worker->jobs))
            {
                // enter 
                tb_spinlock_enter(&impl->lock);

                // init the pull time
                worker->pull = 0;

                // pull from the urgent jobs
                if (tb_list_entry_size(&impl->jobs_urgent))
                {
                    // trace
                    tb_trace_d("worker[%lu]: try pulling from urgent: %lu", worker->id, tb_list_entry_size(&impl->jobs_urgent));

                    // pull it
                    tb_remove_if_until(tb_list_entry_itor(&impl->jobs_urgent), tb_thread_pool_worker_walk_pull, worker);
                }

                // pull from the waiting jobs
                if (tb_list_entry_size(&impl->jobs_waiting))
                {
                    // trace
                    tb_trace_d("worker[%lu]: try pulling from waiting: %lu", worker->id, tb_list_entry_size(&impl->jobs_waiting));

                    // pull it
                    tb_remove_if_until(tb_list_entry_itor(&impl->jobs_waiting), tb_thread_pool_worker_walk_pull, worker);
                }

                // pull from the pending jobs and clean some finished and killed jobs
                if (tb_list_entry_size(&impl->jobs_pending))
                {
                    // trace
                    tb_trace_d("worker[%lu]: try pulling from pending: %lu", worker->id, tb_list_entry_size(&impl->jobs_pending));

                    // no jobs? try to pull from the pending jobs
                    if (!tb_vector_size(worker->jobs))
                        tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_pull_and_clean, worker);
                    // clean some finished and killed jobs
                    else tb_remove_if(tb_list_entry_itor(&impl->jobs_pending), tb_thread_pool_worker_walk_clean, worker);
                }

                // leave 
                tb_spinlock_leave(&impl->lock);

                // idle? wait it
                if (!tb_vector_size(worker->jobs))
                {
                    // killed?
                    tb_check_break(!tb_atomic_get(&worker->bstoped));

                    // trace
                    tb_trace_d("worker[%lu]: wait: ..", worker->id);

                    // wait some time
                    tb_long_t wait = tb_semaphore_wait(impl->semaphore, -1);
                    tb_assert_and_check_break(wait > 0);

                    // trace
                    tb_trace_d("worker[%lu]: wait: ok", worker->id);

                    // continue it
                    continue;
                }
                else
                {
#ifdef TB_TRACE_DEBUG
                    // update the jobs urgent size
                    tb_size_t jobs_urgent_size = tb_list_entry_size(&impl->jobs_urgent);

                    // update the jobs waiting size
                    tb_size_t jobs_waiting_size = tb_list_entry_size(&impl->jobs_waiting);

                    // update the jobs pending size
                    tb_size_t jobs_pending_size = tb_list_entry_size(&impl->jobs_pending);

                    // trace
                    tb_trace_d("worker[%lu]: pull: jobs: %lu, time: %lu ms, waiting: %lu, pending: %lu, urgent: %lu", worker->id, tb_vector_size(worker->jobs), worker->pull, jobs_waiting_size, jobs_pending_size, jobs_urgent_size);
#endif
                }
            }

            // done jobs
            tb_for_all (tb_thread_pool_job_t*, job, worker->jobs)
            {
                // check
                tb_assert_and_check_continue(job && job->task.done);

                // the job state
                tb_size_t state = tb_atomic_fetch_and_pset(&job->state, TB_STATE_WAITING, TB_STATE_WORKING);
                
                // the job is waiting? work it
                if (state == TB_STATE_WAITING)
                {
                    // trace
                    tb_trace_d("worker[%lu]: done: task[%p:%s]: ..", worker->id, job->task.done, job->task.name);

                    // init the time
                    tb_hong_t time = tb_cache_time_spak();

                    // done the job
                    job->task.done((tb_thread_pool_worker_ref_t)worker, job->task.priv);

                    // computate the time
                    time = tb_cache_time_spak() - time;

                    // exists? update time and count
                    tb_size_t               itor;
                    tb_hash_map_item_ref_t  item = tb_null;
                    if (    ((itor = tb_hash_map_find(worker->stats, job->task.done)) != tb_iterator_tail(worker->stats))
                        &&  (item = (tb_hash_map_item_ref_t)tb_iterator_item(worker->stats, itor)))
                    {
                        // the stats
                        tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)item->data;
                        tb_assert_and_check_break(stats);

                        // update the done count
                        stats->done_count++;

                        // update the total time 
                        stats->total_time += time;
                    }
                    
                    // no item? add it
                    if (!item) 
                    {
                        // init stats
                        tb_thread_pool_job_stats_t stats = {0};
                        stats.done_count = 1;
                        stats.total_time = time;

                        // add stats
                        tb_hash_map_insert(worker->stats, job->task.done, &stats);
                    }

#ifdef TB_TRACE_DEBUG
                    tb_size_t done_count = 0;
                    tb_hize_t total_time = 0;
                    tb_thread_pool_job_stats_t* stats = (tb_thread_pool_job_stats_t*)tb_hash_map_get(worker->stats, job->task.done);
                    if (stats)
                    {
                        done_count = stats->done_count;
                        total_time = stats->total_time;
                    }

                    // trace
                    tb_trace_d("worker[%lu]: done: task[%p:%s]: time: %lld ms, average: %lld ms, count: %lu", worker->id, job->task.done, job->task.name, time, (total_time / (tb_hize_t)done_count), done_count);
#endif

                    // update the job state
                    tb_atomic_set(&job->state, TB_STATE_FINISHED);
                }
                // the job is killing? work it
                else if (state == TB_STATE_KILLING)
                {
                    // update the job state
                    tb_atomic_set(&job->state, TB_STATE_KILLED);
                }
            }

            // clear jobs
            tb_vector_clear(worker->jobs);
        }

    } while (0);

    // exit worker
    if (worker)
    {
        // trace
        tb_trace_d("worker[%lu]: exit", worker->id);

        // stoped
        tb_atomic_set(&worker->bstoped, 1);

        // exit all private data
        tb_size_t i = 0;
        tb_size_t n = tb_arrayn(worker->priv);
        for (i = 0; i < n; i++)
        {
            // the private data
            tb_thread_pool_worker_priv_t* priv = &worker->priv[n - i - 1];

            // exit it
            if (priv->exit) priv->exit((tb_thread_pool_worker_ref_t)worker, priv->priv);

            // clear it
            priv->exit = tb_null;
            priv->priv = tb_null;
        }

        // exit stats
        if (worker->stats) tb_hash_map_exit(worker->stats);
        worker->stats = tb_null;

        // exit jobs
        if (worker->jobs) tb_vector_exit(worker->jobs);
        worker->jobs = tb_null;
    }

    // exit
    tb_thread_return(tb_null);
    return tb_null;
}