//哈希表迁移线程会在assoc.c文件中的assoc_maintenance_thread函数调用switch_item_lock_type函数,让所有的
//workers线程都切换到段级别锁或者全局级别锁
void switch_item_lock_type(enum item_lock_types type) { //函数thread_libevent_process接收l 或者g信息
    char buf[1];
    int i;

    switch (type) {
        case ITEM_LOCK_GRANULAR:
            buf[0] = 'l';//用l表示ITEM_LOCK_GRANULAR 段级别锁  
            break;
        case ITEM_LOCK_GLOBAL:
            buf[0] = 'g';//用g表示ITEM_LOCK_GLOBAL 全局级别锁  
            break;
        default: //通过向worker监听的管道写入一个字符通知worker线程  
            fprintf(stderr, "Unknown lock type: %d\n", type);
            assert(1 == 0);
            break;
    }

    pthread_mutex_lock(&init_lock);
    init_count = 0;
    for (i = 0; i < settings.num_threads; i++) {
        if (write(threads[i].notify_send_fd, buf, 1) != 1) {
            perror("Failed writing to notify pipe");
            /* TODO: This is a fatal problem. Can it ever happen temporarily? */
        }
    }
    //等待所有的workers线程都把锁切换到type指明的锁类型  
    wait_for_thread_registration(settings.num_threads);
    pthread_mutex_unlock(&init_lock);
}
void switch_item_lock_type(enum item_lock_types type) {
    char buf[1];
    int i;

    switch (type) {
    case ITEM_LOCK_GRANULAR:
        buf[0] = 'l';
        break;
    case ITEM_LOCK_GLOBAL:
        buf[0] = 'g';
        break;
    default:
        fprintf(stderr, "Unknown lock type: %d\n", type);
        assert(1 == 0);
        break;
    }

    pthread_mutex_lock(&init_lock);
    init_count = 0;
    for (i = 0; i < settings.num_threads; i++) {
        if (write(threads[i].notify_send_fd, buf, 1) != 1) {
            perror("Failed writing to notify pipe");
            /* TODO: This is a fatal problem. Can it ever happen temporarily? */
        }
    }
    wait_for_thread_registration(settings.num_threads);
    pthread_mutex_unlock(&init_lock);
}
Пример #3
0
//
//初始化工作池
struct zhw_worker_pool_t *zhw_worker_pool_init(void)
{
    struct zhw_worker_pool_t *p = calloc(1, sizeof(struct zhw_worker_pool_t));
    if(NULL == p) {
        ZHW_LOG_ERR("CALLOC FAIL");
        return p;
    }
    p->cache_task = zhw_cache_create(32, sizeof(struct zhw_worker_pool_task_t));
    const long cpu_num = sysconf(_SC_NPROCESSORS_ONLN);
    const long worker_num = cpu_num > 32 ? (cpu_num + 1) : 32;
    p->workers = calloc(worker_num, sizeof(struct zhw_worker_t));
    if(NULL == p->workers) {
        ZHW_LOG_ERR("CALLOC FAIL");
        free(p);
        p = NULL;
        return p;
    }
    ZHW_LOG_INFO("cpu_num: %ld, worker_num: %ld", cpu_num, worker_num);
    long i;
    pthread_mutex_init(&p->init_lock, NULL);
    pthread_cond_init(&p->init_cond, NULL);
    pthread_mutex_init(&p->tasks.lock, NULL);
    pthread_cond_init(&p->tasks.cond, NULL);
    p->worker_num = worker_num;
    for(i = 0; i < worker_num; i++) {
        p->workers[i].id = i;
        p->workers[i].pool = p;
        create_worker_thread(worker_thread_loop, &p->workers[i]);
    }
    wait_for_thread_registration(p);
    return p;
}
Пример #4
0
extern RET_T ss_create_worker_threads(size_t thread_num, P_THREAD_OBJ threads)
{
    int i = 0;

    pthread_attr_t  attr;
    int ret = 0;

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    for (i=0; i < thread_num; ++i)
    {
        int fds[2];
        if (pipe(fds)) {
            SYS_ABORT("Can't create notify pipe");
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];

        threads[i].base = event_init();
        if (! threads[i].base ){
            SYS_ABORT("Can't allocate event base");
        }
        threads[i].p_notify_event = event_new(threads[i].base, threads[i].notify_receive_fd,
                                             EV_READ | EV_PERSIST, thread_process, &threads[i]);

        if (! threads[i].p_notify_event ){
            SYS_ABORT("Can't allocate new event");
        }

        if (event_add(threads[i].p_notify_event, 0) == -1) {
            SYS_ABORT("Can't monitor libevent notify pipe");
        }

        slist_init(&threads[i].conn_queue);
        pthread_mutex_init(&threads[i].q_lock, NULL); 

        pthread_attr_init(&attr);
        if ((ret = pthread_create(&threads[i].thread_id, &attr, thread_run, (void*)(&threads[i]))) != 0) 
        {
             SYS_ABORT("Cannot create worker thread %s", strerror(ret));
        }
    }


    pthread_mutex_lock(&init_lock);
    wait_for_thread_registration(thread_num);
    pthread_mutex_unlock(&init_lock);

}
Пример #5
0
/* Must not be called with any deeper locks held */
void pause_threads(enum pause_thread_types type) {
    char buf[1];
    int i;

    buf[0] = 0;
    switch (type) {
        case PAUSE_ALL_THREADS:
            lru_maintainer_pause();
            slabs_rebalancer_pause();
            lru_crawler_pause();
#ifdef EXTSTORE
            storage_compact_pause();
#endif
        case PAUSE_WORKER_THREADS:
            buf[0] = 'p';
            pthread_mutex_lock(&worker_hang_lock);
            break;
        case RESUME_ALL_THREADS:
            lru_maintainer_resume();
            slabs_rebalancer_resume();
            lru_crawler_resume();
#ifdef EXTSTORE
            storage_compact_resume();
#endif
        case RESUME_WORKER_THREADS:
            pthread_mutex_unlock(&worker_hang_lock);
            break;
        default:
            fprintf(stderr, "Unknown lock type: %d\n", type);
            assert(1 == 0);
            break;
    }

    /* Only send a message if we have one. */
    if (buf[0] == 0) {
        return;
    }

    pthread_mutex_lock(&init_lock);
    init_count = 0;
    for (i = 0; i < settings.num_threads; i++) {
        if (write(threads[i].notify_send_fd, buf, 1) != 1) {
            perror("Failed writing to notify pipe");
            /* TODO: This is a fatal problem. Can it ever happen temporarily? */
        }
    }
    wait_for_thread_registration(settings.num_threads);
    pthread_mutex_unlock(&init_lock);
}
Пример #6
0
/*
 * Initializes the thread subsystem, creating various worker threads.
 *
 * nthreads  Number of worker event handler threads to spawn
 * main_base Event base for main thread
 */
void thread_init(int nthreads, struct event_base *main_base)
{
    int         i;
    int         power;

    pthread_mutex_init(&cache_lock, NULL);
    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    threads = (LIBEVENT_THREAD *)calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (! threads)
    {
        log_debug(LOG_ERR, "Can't allocate thread descriptors, error:%s\n", strerror(errno));
        exit(1);
    }

    dispatcher_thread.base = main_base;
    dispatcher_thread.thread_id = pthread_self();

    for (i = 0; i < nthreads; i++)
    {
        int fds[2];
        if (pipe(fds))
        {
            log_debug(LOG_ERR, "Can't create notify pipe, error:%s\n", strerror(errno));
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];

        setup_thread(&threads[i]);
        /* Reserve three fds for the libevent base, and two for the pipe */
    }

    /* Create threads after we've done all the libevent setup. */
    for (i = 0; i < nthreads; i++)
    {
        create_worker(worker_libevent, &threads[i]);
    }

    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
    wait_for_thread_registration(nthreads);
    pthread_mutex_unlock(&init_lock);
}
Пример #7
0
void thread_init(struct event_base *main_base, int nthreads, pthread_t *th)
{
    int i;

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    dispatcher_thread.base = main_base;
    dispatcher_thread.thread_id = pthread_self();

    threads = (LIBEVENT_THREAD *)calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (NULL == threads) {
        mfatal("allocate threads failed!");
        exit(1);
    }

    for (i = 0; i < nthreads; i++) {
        int fds[2];
        if (pipe(fds)) {
            mfatal("can't create notify pipe!");
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];

        setup_thread(&threads[i]);
    }

    for (i = 0; i < nthreads; i++) {
        create_worker(worker_libevent, &threads[i], th + i);
    }

    pthread_mutex_lock(&init_lock);
    wait_for_thread_registration(nthreads);
    pthread_mutex_unlock(&init_lock);

    num_threads = nthreads;
}
Пример #8
0
/*
 * Initializes the thread subsystem, creating various worker threads.
 *
 * nthreads  Number of worker event handler threads to spawn
 */
void memcached_thread_init(int nthreads, void *arg) {
    int         i;
    int         power;

    for (i = 0; i < POWER_LARGEST; i++) {
        pthread_mutex_init(&lru_locks[i], NULL);
    }
    pthread_mutex_init(&worker_hang_lock, NULL);

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    /* Want a wide lock table, but don't waste memory */
    if (nthreads < 3) {
        power = 10;
    } else if (nthreads < 4) {
        power = 11;
    } else if (nthreads < 5) {
        power = 12;
    } else if (nthreads <= 10) {
        power = 13;
    } else if (nthreads <= 20) {
        power = 14;
    } else {
        /* 32k buckets. just under the hashpower default. */
        power = 15;
    }

    if (power >= hashpower) {
        fprintf(stderr, "Hash table power size (%d) cannot be equal to or less than item lock table (%d)\n", hashpower, power);
        fprintf(stderr, "Item lock table grows with `-t N` (worker threadcount)\n");
        fprintf(stderr, "Hash table grows with `-o hashpower=N` \n");
        exit(1);
    }

    item_lock_count = hashsize(power);
    item_lock_hashpower = power;

    item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
    if (! item_locks) {
        perror("Can't allocate item locks");
        exit(1);
    }
    for (i = 0; i < item_lock_count; i++) {
        pthread_mutex_init(&item_locks[i], NULL);
    }

    threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (! threads) {
        perror("Can't allocate thread descriptors");
        exit(1);
    }

    for (i = 0; i < nthreads; i++) {
        int fds[2];
        if (pipe(fds)) {
            perror("Can't create notify pipe");
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];

        setup_thread(&threads[i]);
        /* Reserve three fds for the libevent base, and two for the pipe */
        stats_state.reserved_fds += 5;
    }

    /* Create threads after we've done all the libevent setup. */
    for (i = 0; i < nthreads; i++) {
        create_worker(worker_libevent, &threads[i]);
    }

    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
    wait_for_thread_registration(nthreads);
    pthread_mutex_unlock(&init_lock);
}
 //参数nthread是woker线程的数量。main_base则是主线程的event_base
 //主线程在main函数调用本函数,创建nthreads个worker线程
void thread_init(int nthreads, struct event_base *main_base) {
    int         i;
    int         power;

	//申请一个CQ_ITEM时需要加锁
    pthread_mutex_init(&cache_lock, NULL);
    pthread_mutex_init(&stats_lock, NULL);

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    /* Want a wide lock table, but don't waste memory */
    if (nthreads < 3) {
        power = 10;
    } else if (nthreads < 4) {
        power = 11;
    } else if (nthreads < 5) {
        power = 12;
    } else {
        /* 8192 buckets, and central locks don't scale much past 5 threads */
        power = 13;
    }

    item_lock_count = hashsize(power);
    item_lock_hashpower = power;

    //哈希表中段级别的锁。并不是一个桶就对应有一个锁。而是多个桶共用一个锁  
    item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
    if (! item_locks) {
        perror("Can't allocate item locks");
        exit(1);
    }
    for (i = 0; i < item_lock_count; i++) {
        pthread_mutex_init(&item_locks[i], NULL);
    }
    pthread_key_create(&item_lock_type_key, NULL);
    pthread_mutex_init(&item_global_lock, NULL);

	//申请具有nthreads个元素的LIBEVENT_THREAD数据
    threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (! threads) {
        perror("Can't allocate thread descriptors");
        exit(1);
    }

    //主线程对应的
    dispatcher_thread.base = main_base;
    dispatcher_thread.thread_id = pthread_self();

    //子线程的
    for (i = 0; i < nthreads; i++) {
        int fds[2];
		//为每个worker线程分配一个管道,用于通知worker线程
        if (pipe(fds)) {
            perror("Can't create notify pipe");
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];
		//每一个线程配一个event_base,并设置event监听notify_receive_fd的读事件
		//同时还为这个线程分配一个conn_queue队列
        setup_thread(&threads[i]);
        /* Reserve three fds for the libevent base, and two for the pipe */
        stats.reserved_fds += 5;
    }

    /* Create threads after we've done all the libevent setup. */
    for (i = 0; i < nthreads; i++) {
		//创建线程,线程函数为worker_libevent,线程参数为&thread[i]
        create_worker(worker_libevent, &threads[i]);
    }
    
    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
    wait_for_thread_registration(nthreads); //主线程阻塞等待事件到来
    pthread_mutex_unlock(&init_lock);
}
/*
 * Initializes the thread subsystem, creating various worker threads.
 *
 * nthreads  Number of worker event handler threads to spawn
 * main_base Event base for main thread
 */
void thread_init(int nthreads, struct event_base *main_base) {
    int         i;
    int         power;

    pthread_mutex_init(&cache_lock, NULL);
    pthread_mutex_init(&stats_lock, NULL);

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    /* Want a wide lock table, but don't waste memory */
    if (nthreads < 3) {
        power = 10;
    } else if (nthreads < 4) {
        power = 11;
    } else if (nthreads < 5) {
        power = 12;
    } else {
        /* 8192 buckets, and central locks don't scale much past 5 threads */
        power = 13;
    }

    item_lock_count = hashsize(power);
    item_lock_hashpower = power;

    item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
    if (! item_locks) {
        perror("Can't allocate item locks");
        exit(1);
    }
    for (i = 0; i < item_lock_count; i++) {
        pthread_mutex_init(&item_locks[i], NULL);
    }
    pthread_key_create(&item_lock_type_key, NULL);
    pthread_mutex_init(&item_global_lock, NULL);

    threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (! threads) {
        perror("Can't allocate thread descriptors");
        exit(1);
    }

    dispatcher_thread.base = main_base;
    dispatcher_thread.thread_id = pthread_self();

    for (i = 0; i < nthreads; i++) {
        int fds[2];
        if (pipe(fds)) {
            perror("Can't create notify pipe");
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];

        setup_thread(&threads[i]);
        /* Reserve three fds for the libevent base, and two for the pipe */
        stats.reserved_fds += 5;
    }

    /* Create threads after we've done all the libevent setup. */
    for (i = 0; i < nthreads; i++) {
        create_worker(worker_libevent, &threads[i]);
    }

    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
    wait_for_thread_registration(nthreads);
    pthread_mutex_unlock(&init_lock);
}
Пример #11
0
 //初始化主线程
void thread_init(int nthreads, struct event_base *main_base) {
    int         i;
    int         power;

    pthread_mutex_init(&cache_lock, NULL);
    pthread_mutex_init(&stats_lock, NULL);

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    /* Want a wide lock table, but don't waste memory */
	//初始化item lock
	//根据线程数目调配锁的数目(加锁是由于线程之间的并发导致的,所以item锁的数量也要根据线程的个数来进行调配)
    if (nthreads < 3) {
        power = 10;
    } else if (nthreads < 4) {
        power = 11;
    } else if (nthreads < 5) {
        power = 12;
    } else {
        /* 8192 buckets, and central locks don't scale much past 5 threads */
        power = 13;
    }

    item_lock_count = hashsize(power);
    item_lock_hashpower = power;

    item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
    if (! item_locks) {
        perror("Can't allocate item locks");
        exit(1);
    }
    for (i = 0; i < item_lock_count; i++) {
        pthread_mutex_init(&item_locks[i], NULL);
    }
    pthread_key_create(&item_lock_type_key, NULL);
    pthread_mutex_init(&item_global_lock, NULL);

	//创建nthreads个worker线程对象
    threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (! threads) {
        perror("Can't allocate thread descriptors");
        exit(1);
    }

	//设置主线程对象的event_base
    dispatcher_thread.base = main_base;
	//设置主线程对象的pid
    dispatcher_thread.thread_id = pthread_self();

	//为每个work线程创建与主线程通信的管道
    for (i = 0; i < nthreads; i++) {
        int fds[2];
		//建立管道
        if (pipe(fds)) {
            perror("Can't create notify pipe");
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0]; //worker线程管道接收fd
        threads[i].notify_send_fd = fds[1];  //worker线程管道发送fd

		//初始化worker线程的libevent配置
        setup_thread(&threads[i]);
        /* Reserve three fds for the libevent base, and two for the pipe */
        stats.reserved_fds += 5;
    }

    /* Create threads after we've done all the libevent setup. */
    for (i = 0; i < nthreads; i++) {
		//创建worker线程,配置worker线程运行的函数:worker_libevent
        create_worker(worker_libevent, &threads[i]);
    }

    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
	//等待所有worker线程启动完毕
    wait_for_thread_registration(nthreads);
    pthread_mutex_unlock(&init_lock);
}
Пример #12
0
/*
 * 初始化线程子系统, 创建工作线程
 * Initializes the thread subsystem, creating various worker threads.
 *
 * nthreads  Number of worker event handler threads to spawn
    需准备的线程数
 * main_base Event base for main thread
    分发线程
 */
void thread_init(int nthreads, struct event_base *main_base) {
    int         i;
    int         power;

    // 互斥量初始化
    pthread_mutex_init(&cache_lock, NULL);
    pthread_mutex_init(&stats_lock, NULL);

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    /* Want a wide lock table, but don't waste memory */
    // 锁表?
    if (nthreads < 3) {
        power = 10;
    } else if (nthreads < 4) {
        power = 11;
    } else if (nthreads < 5) {
        power = 12;
    } else {
        // 2^13
        /* 8192 buckets, and central locks don't scale much past 5 threads */
        power = 13;
    }

    // 预申请那么多的锁, 拿来做什么
    // hashsize = 2^n
    item_lock_count = hashsize(power);

    item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
    if (! item_locks) {
        perror("Can't allocate item locks");
        exit(1);
    }
    // 初始化
    for (i = 0; i < item_lock_count; i++) {
        pthread_mutex_init(&item_locks[i], NULL);
    }
    pthread_key_create(&item_lock_type_key, NULL);
    pthread_mutex_init(&item_global_lock, NULL);


    // LIBEVENT_THREAD 是结合 libevent 使用的结构体, event_base, 读写管道
    threads = calloc(nthreads, sizeof(LIBEVENT_THREAD)); // 生成 nthreads 数量的线程
    if (! threads) {
        perror("Can't allocate thread descriptors");
        exit(1);
    }

    // main_base 应该是分发任务的线程, 即主线程
    dispatcher_thread.base = main_base;
    dispatcher_thread.thread_id = pthread_self();

    // 管道, libevent 通知用的
    for (i = 0; i < nthreads; i++) {
        int fds[2];
        if (pipe(fds)) {
            perror("Can't create notify pipe");
            exit(1);
        }

        // 读管道
        threads[i].notify_receive_fd = fds[0];
        // 写管道
        threads[i].notify_send_fd = fds[1];

        // 初始化线程信息数据结构, 其中就将 event 结构体的回调函数设置为 thread_libevent_process()
        setup_thread(&threads[i]);
        /* Reserve three fds for the libevent base, and two for the pipe */
        stats.reserved_fds += 5;
    }

    /* Create threads after we've done all the libevent setup. */
    // 创建并初始化线程, 线程的代码都是 work_libevent()
    for (i = 0; i < nthreads; i++) {
        // 调用 pthread_attr_init() 和 pthread_create()
        create_worker(worker_libevent, &threads[i]);
    }

    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
    // wait_for_thread_registration() 是 pthread_cond_wait 的调用
    wait_for_thread_registration(nthreads);
    pthread_mutex_unlock(&init_lock);
}