/** * @brief Set the kind of a pthread read/write lock attributes object. * @see pthread_rwlockattr_setkind_np() * @param attr a pointer to the read/write lock attributes object to be adjusted. * @param pref the kind of the read/write lock: PTHREAD_RWLOCK_PREFER_READER_NP, PTHREAD_RWLOCK_PREFER_WRITER_NP, or PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP. * @return 0 on success or an error number on failure. */ int rwlock_attr_setkind(pthread_rwlockattr_t *attr, int pref) { #ifdef MAGMA_PEDANTIC int result = pthread_rwlockattr_setkind_np(attr, pref); if (result) log_pedantic("Could not set the read/write lock attribute preference. {pthread_rwlockattr_setkind_np = %i}", result); return result; #else return pthread_rwlockattr_setkind_np(attr, pref); #endif }
cRwLock::cRwLock(bool PreferWriter) { pthread_rwlockattr_t attr; pthread_rwlockattr_init(&attr); pthread_rwlockattr_setkind_np(&attr, PreferWriter ? PTHREAD_RWLOCK_PREFER_WRITER_NP : PTHREAD_RWLOCK_PREFER_READER_NP); pthread_rwlock_init(&rwlock, &attr); }
static uint32_t diameter_create_lock(diameter_lock_handle *lock) { pthread_rwlockattr_t rwlock_attr; pthread_rwlock_t *rwlock; int ret; ret = pthread_rwlockattr_init(&rwlock_attr); if (ret) return DIAMETER_CB_ERROR; ret = pthread_rwlockattr_setkind_np(&rwlock_attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); if (ret) return DIAMETER_CB_ERROR; rwlock = nkn_malloc_type(sizeof(pthread_rwlock_t), mod_diameter_t); if (rwlock == NULL) return DIAMETER_CB_ERROR; ret = pthread_rwlock_init(rwlock, &rwlock_attr); if (ret) { free(rwlock); return DIAMETER_CB_ERROR; } *lock = rwlock; return DIAMETER_CB_OK; }
/* The 'offset' parameter is optional, but must be provided to be able to use heap_remove(). * If heap_remove() will not be used, then a negative value can be provided. */ heap_t heap_new(int height, ssize_t offset, int cmp(const void *x, const void *y)) { heap_t heap; pthread_mutexattr_t attr; pthread_rwlockattr_t rwattr; if (height <= 0) height = 8; if (cmp == NULL) return NULL; if (NEW(heap) == NULL) return NULL; heap->avail = (height << 1) - 1; heap->curr = 0; if ((heap->h = CALLOC(1, heap->avail * sizeof *heap->h)) == NULL) { FREE(heap); return NULL; } heap->offset = offset; heap->cmp = cmp; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutex_init(&heap->lock, &attr); pthread_mutexattr_destroy(&attr); pthread_rwlockattr_init(&rwattr); pthread_rwlockattr_setkind_np(&rwattr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); pthread_rwlock_init(&heap->rwlock, &rwattr); pthread_rwlockattr_destroy(&rwattr); return heap; }
/** * @brief Initialize the package. */ void cih_pkginit(void) { pthread_rwlockattr_t rwlock_attr; cih_partition_t *cp; uint32_t npart; uint32_t cache_sz = 32767; /* XXX */ int ix; /* avoid writer starvation */ pthread_rwlockattr_init(&rwlock_attr); #ifdef GLIBC pthread_rwlockattr_setkind_np( &rwlock_attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); #endif npart = nfs_param.cache_param.nparts; cih_fhcache.npart = npart; cih_fhcache.partition = gsh_calloc(npart, sizeof(cih_partition_t)); for (ix = 0; ix < npart; ++ix) { cp = &cih_fhcache.partition[ix]; cp->part_ix = ix; pthread_rwlock_init(&cp->lock, &rwlock_attr); avltree_init(&cp->t, cih_fh_cmpf, 0 /* must be 0 */); cih_fhcache.cache_sz = cache_sz; cp->cache = gsh_calloc(cache_sz, sizeof(struct avltree_node *)); } initialized = true; }
celix_status_t producer_create(char* name, producer_pt* producer) { celix_status_t status = CELIX_SUCCESS; producer_pt lclProducer = calloc(1, sizeof(*lclProducer)); if (lclProducer != NULL) { lclProducer->name = strdup(name); lclProducer->utilizationStatsName = calloc(1, strlen(name) + strlen(THROUGHPUT_NAME_POSTFIX) + 1); if (lclProducer->name != NULL && lclProducer->utilizationStatsName != NULL) { pthread_rwlockattr_t queueLockAttr; sprintf(lclProducer->utilizationStatsName, "%s%s", lclProducer->name, (char*) THROUGHPUT_NAME_POSTFIX); pthread_rwlockattr_init(&queueLockAttr); pthread_rwlockattr_setkind_np(&queueLockAttr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); pthread_rwlock_init(&lclProducer->queueLock, &queueLockAttr); lclProducer->queueServices = hashMap_create(utils_stringHash, NULL, utils_stringEquals, NULL); (*producer) = lclProducer; } else { status = CELIX_ENOMEM; } } else { status = CELIX_ENOMEM; } return status; }
void xpthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref) { xpthread_check_return ("pthread_rwlockattr_setkind_np", pthread_rwlockattr_setkind_np (attr, pref)); }
int __ast_rwlock_init(int tracking, const char *filename, int lineno, const char *func, const char *rwlock_name, ast_rwlock_t *t) { int res; pthread_rwlockattr_t attr; #ifdef DEBUG_THREADS #if defined(AST_MUTEX_INIT_W_CONSTRUCTORS) && defined(CAN_COMPARE_MUTEX_TO_INIT_VALUE) int canlog = strcmp(filename, "logger.c") & t->tracking; if (t->lock != ((pthread_rwlock_t) __AST_RWLOCK_INIT_VALUE)) { __ast_mutex_logger("%s line %d (%s): Warning: rwlock '%s' is already initialized.\n", filename, lineno, func, rwlock_name); return 0; } #endif /* AST_MUTEX_INIT_W_CONSTRUCTORS */ if ((t->tracking = tracking)) { ast_reentrancy_init(&t->track); } #endif /* DEBUG_THREADS */ pthread_rwlockattr_init(&attr); #ifdef HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NP pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NP); #endif res = pthread_rwlock_init(&t->lock, &attr); pthread_rwlockattr_destroy(&attr); return res; }
CRWLock::CRWLock() { pthread_rwlockattr_t attr; pthread_rwlockattr_init( &attr ); //写锁优先,一旦有写锁去请求,后续所有的读锁请求都会阻塞,即使已经有线程获取了读锁也如此 pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); pthread_rwlock_init(&_rwlock,&attr); pthread_rwlockattr_destroy(&attr); }
ReadWriteLock::ReadWriteLock() { pthread_rwlockattr_t myAttributes; pthread_rwlockattr_init(&myAttributes); pthread_rwlockattr_setkind_np(&myAttributes,PTHREAD_RWLOCK_PREFER_READER_NP); int myStatus = pthread_rwlock_init(&_myLock,&myAttributes); if (myStatus != 0) { throw LockInitFailed(strerror(myStatus),PLUS_FILE_LINE); } }
rw_mutex::impl::impl() :valid(false) { pthread_rwlockattr_t attr; pthread_rwlockattr_init(&attr); #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP // switch to writer-preferred lock on linux pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); #endif int res=pthread_rwlock_init(&lk,&attr); if (res==0) valid=true; }
void AA_init() { pthread_rwlockattr_t aaa_lock_attr; int i; for (i = MIN_IPSPACE_ID; i <=MAX_IPSPACE_ID; i++) { aaa[i] = NULL; } /* set the 'prefer writer' flag to the rwlock */ pthread_rwlockattr_init(&aaa_lock_attr); pthread_rwlockattr_setkind_np(&aaa_lock_attr, PTHREAD_RWLOCK_PREFER_WRITER_NP); pthread_rwlock_init(&aaa_lock, &aaa_lock_attr); AA_load(); }
static int do_test (void) { size_t cnt; for (cnt = 0; cnt < sizeof (kind) / sizeof (kind[0]); ++cnt) { pthread_rwlock_t r; pthread_rwlockattr_t a; if (pthread_rwlockattr_init (&a) != 0) FAIL_EXIT1 ("round %Zu: rwlockattr_t failed\n", cnt); if (pthread_rwlockattr_setkind_np (&a, kind[cnt]) != 0) FAIL_EXIT1 ("round %Zu: rwlockattr_setkind failed\n", cnt); if (pthread_rwlock_init (&r, &a) != 0) FAIL_EXIT1 ("round %Zu: rwlock_init failed\n", cnt); if (pthread_rwlockattr_destroy (&a) != 0) FAIL_EXIT1 ("round %Zu: rwlockattr_destroy failed\n", cnt); struct timespec ts; xclock_gettime (CLOCK_REALTIME, &ts); ++ts.tv_sec; /* Get a read lock. */ if (pthread_rwlock_timedrdlock (&r, &ts) != 0) FAIL_EXIT1 ("round %Zu: rwlock_timedrdlock failed\n", cnt); printf ("%zu: got timedrdlock\n", cnt); pthread_t th; if (pthread_create (&th, NULL, tf, &r) != 0) FAIL_EXIT1 ("round %Zu: create failed\n", cnt); void *status; if (pthread_join (th, &status) != 0) FAIL_EXIT1 ("round %Zu: join failed\n", cnt); if (status != NULL) FAIL_EXIT1 ("failure in round %Zu\n", cnt); if (pthread_rwlock_destroy (&r) != 0) FAIL_EXIT1 ("round %Zu: rwlock_destroy failed\n", cnt); } return 0; }
void gdnsd_prcu_setup_lock(void) { int pthread_err; pthread_rwlockattr_t lockatt; if((pthread_err = pthread_rwlockattr_init(&lockatt))) log_fatal("pthread_rwlockattr_init() failed: %s", dmn_logf_strerror(pthread_err)); // Non-portable way to boost writer priority. Our writelocks are held very briefly // and very rarely, whereas the readlocks could be very spammy, and we don't want to // block the write operation forever. This works on Linux+glibc. # ifdef PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP if((pthread_err = pthread_rwlockattr_setkind_np(&lockatt, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP))) log_fatal("pthread_rwlockattr_setkind_np(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) failed: %s", dmn_logf_strerror(pthread_err)); # endif if((pthread_err = pthread_rwlock_init(&gdnsd_prcu_rwlock, &lockatt))) log_fatal("pthread_rwlock_init() failed: %s", dmn_logf_strerror(pthread_err)); if((pthread_err = pthread_rwlockattr_destroy(&lockatt))) log_fatal("pthread_rwlockattr_destroy() failed: %s", dmn_logf_strerror(pthread_err)); }
springfield_t * springfield_create(char *path, uint32_t num_buckets) { assert(sizeof(void *) == 8); // Springfield needs 64-bit system springfield_t *r = calloc(1, sizeof(springfield_t)); r->num_buckets = num_buckets; r->path = malloc(strlen(path) + 1); strcpy(r->path, path); r->mmap_alloc = 0; pthread_rwlockattr_t attr; pthread_rwlockattr_init(&attr); int res = pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); assert(!res); pthread_rwlock_init(&r->main_lock, &attr); pthread_mutex_init(&r->iter_lock, NULL); springfield_load(r); return r; }
/** * Allocate a new Video mixer * * @param mixp Pointer to allocated video mixer * * @return 0 for success, otherwise error code */ int vidmix_alloc(struct vidmix **mixp) { pthread_rwlockattr_t attr; struct vidmix *mix; int err; if (!mixp) return EINVAL; mix = mem_zalloc(sizeof(*mix), destructor); if (!mix) return ENOMEM; err = pthread_rwlockattr_init(&attr); if (err) { mem_deref(mix); return err; } #if defined(LINUX) && defined(__GLIBC__) err = pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); if (err) goto out; #endif err = pthread_rwlock_init(&mix->rwlock, &attr); if (err) goto out; mix->initialized = true; out: (void)pthread_rwlockattr_destroy(&attr); if (err) mem_deref(mix); else *mixp = mix; return err; }
static void init_read_write_lock(pthread_rwlock_t* lock) { pthread_rwlockattr_t* pattr; #if defined(YOG_HAVE_PTHREAD_RWLOCKATTR_INIT) pthread_rwlockattr_t attr; pthread_rwlockattr_init(&attr); # if defined(YOG_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NP); # endif pattr = &attr; #else pattr = NULL; #endif int err; if ((err = pthread_rwlock_init(lock, pattr)) != 0) { YOG_BUG(NULL, "pthread_rwlock_init failed: %s", strerror(err)); } #if defined(YOG_HAVE_PTHREAD_RWLOCKATTR_INIT) && defined(YOG_HAVE_PTHREAD_RWLOCKATTR_DESTROY) pthread_rwlockattr_destroy(&attr); #endif }
TEST(pthread, pthread_rwlockattr_smoke) { pthread_rwlockattr_t attr; ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED}; for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) { ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i])); int pshared; ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared)); ASSERT_EQ(pshared_value_array[i], pshared); } int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP}; for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) { ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i])); int kind; ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind)); ASSERT_EQ(kind_array[i], kind); } ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); }
gweakref_table_t *gweakref_init(uint32_t npart, uint32_t cache_sz) { int ix = 0; pthread_rwlockattr_t rwlock_attr; gweakref_partition_t *wp = NULL; gweakref_table_t *wt = NULL; wt = gsh_calloc(1, sizeof(gweakref_table_t)); if (!wt) goto out; /* prior versions of Linux tirpc are subject to default prefer-reader * behavior (so have potential for writer starvation) */ pthread_rwlockattr_init(&rwlock_attr); #ifdef GLIBC pthread_rwlockattr_setkind_np( &rwlock_attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); #endif /* npart should be a small integer */ wt->npart = npart; wt->partition = gsh_calloc(npart, sizeof(gweakref_partition_t)); for (ix = 0; ix < npart; ++ix) { wp = &wt->partition[ix]; pthread_rwlock_init(&wp->lock, &rwlock_attr); avltree_init(&wp->t, wk_cmpf, 0 /* must be 0 */); if (cache_sz > 0) { wt->cache_sz = cache_sz; wp->cache = gsh_calloc(cache_sz, sizeof(struct avltree_node *)); } wp->genctr = 0; } out: return (wt); }
static int do_test (void) { size_t cnt; for (cnt = 0; cnt < sizeof (kind) / sizeof (kind[0]); ++cnt) { pthread_rwlock_t r; pthread_rwlockattr_t a; if (pthread_rwlockattr_init (&a) != 0) { printf ("round %Zu: rwlockattr_t failed\n", cnt); exit (1); } if (pthread_rwlockattr_setkind_np (&a, kind[cnt]) != 0) { printf ("round %Zu: rwlockattr_setkind failed\n", cnt); exit (1); } if (pthread_rwlock_init (&r, &a) != 0) { printf ("round %Zu: rwlock_init failed\n", cnt); exit (1); } if (pthread_rwlockattr_destroy (&a) != 0) { printf ("round %Zu: rwlockattr_destroy failed\n", cnt); exit (1); } struct timeval tv; (void) gettimeofday (&tv, NULL); struct timespec ts; TIMEVAL_TO_TIMESPEC (&tv, &ts); ++ts.tv_sec; /* Get a read lock. */ if (pthread_rwlock_timedrdlock (&r, &ts) != 0) { printf ("round %Zu: rwlock_timedrdlock failed\n", cnt); exit (1); } printf ("%zu: got timedrdlock\n", cnt); pthread_t th; if (pthread_create (&th, NULL, tf, &r) != 0) { printf ("round %Zu: create failed\n", cnt); exit (1); } void *status; if (pthread_join (th, &status) != 0) { printf ("round %Zu: join failed\n", cnt); exit (1); } if (status != NULL) { printf ("failure in round %Zu\n", cnt); exit (1); } if (pthread_rwlock_destroy (&r) != 0) { printf ("round %Zu: rwlock_destroy failed\n", cnt); exit (1); } } return 0; }
static int do_test (void) { pthread_rwlock_t r; pthread_rwlockattr_t at; int e; if (pthread_rwlockattr_init (&at) != 0) { puts ("rwlockattr_init failed"); return 1; } puts ("rwlockattr_init succeeded"); #ifndef TYPE # define TYPE PTHREAD_RWLOCK_PREFER_READER_NP #endif if (pthread_rwlockattr_setkind_np (&at, TYPE) != 0) { puts ("rwlockattr_setkind failed"); return 1; } puts ("rwlockattr_setkind succeeded"); if (pthread_rwlock_init (&r, &at) != 0) { puts ("rwlock_init failed"); return 1; } puts ("rwlock_init succeeded"); if (pthread_rwlockattr_destroy (&at) != 0) { puts ("rwlockattr_destroy failed"); return 1; } puts ("rwlockattr_destroy succeeded"); if (pthread_rwlock_wrlock (&r) != 0) { puts ("1st rwlock_wrlock failed"); return 1; } puts ("1st rwlock_wrlock succeeded"); e = pthread_rwlock_tryrdlock (&r); if (e == 0) { puts ("rwlock_tryrdlock on rwlock with writer succeeded"); return 1; } if (e != EBUSY) { puts ("rwlock_tryrdlock on rwlock with writer return value != EBUSY"); return 1; } puts ("rwlock_tryrdlock on rwlock with writer failed with EBUSY"); e = pthread_rwlock_trywrlock (&r); if (e == 0) { puts ("rwlock_trywrlock on rwlock with writer succeeded"); return 1; } if (e != EBUSY) { puts ("rwlock_trywrlock on rwlock with writer return value != EBUSY"); return 1; } puts ("rwlock_trywrlock on rwlock with writer failed with EBUSY"); if (pthread_rwlock_unlock (&r) != 0) { puts ("1st rwlock_unlock failed"); return 1; } puts ("1st rwlock_unlock succeeded"); if (pthread_rwlock_tryrdlock (&r) != 0) { puts ("rwlock_tryrdlock on unlocked rwlock failed"); return 1; } puts ("rwlock_tryrdlock on unlocked rwlock succeeded"); e = pthread_rwlock_trywrlock (&r); if (e == 0) { puts ("rwlock_trywrlock on rwlock with reader succeeded"); return 1; } if (e != EBUSY) { puts ("rwlock_trywrlock on rwlock with reader return value != EBUSY"); return 1; } puts ("rwlock_trywrlock on rwlock with reader failed with EBUSY"); if (pthread_rwlock_unlock (&r) != 0) { puts ("2nd rwlock_unlock failed"); return 1; } puts ("2nd rwlock_unlock succeeded"); if (pthread_rwlock_trywrlock (&r) != 0) { puts ("rwlock_trywrlock on unlocked rwlock failed"); return 1; } puts ("rwlock_trywrlock on unlocked rwlock succeeded"); e = pthread_rwlock_tryrdlock (&r); if (e == 0) { puts ("rwlock_tryrdlock on rwlock with writer succeeded"); return 1; } if (e != EBUSY) { puts ("rwlock_tryrdlock on rwlock with writer return value != EBUSY"); return 1; } puts ("rwlock_tryrdlock on rwlock with writer failed with EBUSY"); if (pthread_rwlock_unlock (&r) != 0) { puts ("3rd rwlock_unlock failed"); return 1; } puts ("3rd rwlock_unlock succeeded"); if (pthread_rwlock_destroy (&r) != 0) { puts ("rwlock_destroy failed"); return 1; } puts ("rwlock_destroy succeeded"); return 0; }
int init_map_private_t(map_private_t *mpriv, const char *cache_name_host, int *online) { int retval = 0; int rv; int n; pthread_rwlockattr_t rwlock_attr; /* Setup hash table for Heartbeat IP:Port to node[] mapping */ mpriv->ht_hostnames = ht_base_nocase_hash; mpriv->ht_hostnames.ht = mpriv->hash_hostnames; mpriv->ht_hostnames.size = sizeof(mpriv->hash_hostnames)/sizeof(hash_entry_t); /* Setup hash table for HTTP IP:Port to node[] mapping */ mpriv->ht_http_hostnames = ht_base_nocase_hash; mpriv->ht_http_hostnames.ht = mpriv->hash_http_hostnames; mpriv->ht_http_hostnames.size = sizeof(mpriv->hash_http_hostnames)/sizeof(hash_entry_t); for (n = 0; mpriv->node[n]; n++) { rv = (*mpriv->ht_hostnames.add_func)(&mpriv->ht_hostnames, mpriv->node[n]->node_host_port, strlen(mpriv->node[n]->node_host_port), n); if (rv) { DBG_LOG(MSG, MOD_CLUSTER, "hash add_func failed, rv=%d, key=%s index=%d", rv, mpriv->node[n]->node_host_port, n); retval = 1; goto err_exit; } rv = (*mpriv->ht_http_hostnames.add_func)(&mpriv->ht_http_hostnames, mpriv->node[n]->http_node_host_port, strlen(mpriv->node[n]->http_node_host_port), n); if (rv) { DBG_LOG(MSG, MOD_CLUSTER, "hash add_func failed, rv=%d, key=%s index=%d", rv, mpriv->node[n]->http_node_host_port, n); retval = 11; goto err_exit; } if (mpriv->node[n]->node_updates) { if (mpriv->node[n]->node_online) { mpriv->online_nodes++; } } else { mpriv->init_in_progress++; } } mpriv->defined_nodes = n; if (!mpriv->init_in_progress) { *online = 1; } else { *online = 0; } /* Generate the host portion of the cache name */ mpriv->cache_name_host = nkn_strdup_type(cache_name_host, mod_cl_cname); mpriv->cache_name_host_strlen = strlen(mpriv->cache_name_host); /* Initialize rwlock and bias for writers. */ rv = pthread_rwlockattr_init(&rwlock_attr); if (rv) { DBG_LOG(MSG, MOD_CLUSTER, "pthread_rwlockattr_init() failed, rv=%d", rv); retval = 2; goto err_exit; } rv = pthread_rwlockattr_setkind_np(&rwlock_attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); if (rv) { DBG_LOG(MSG, MOD_CLUSTER, "pthread_rwlockattr_setkind_np() failed, rv=%d", rv); retval = 3; goto err_exit; } rv = pthread_rwlock_init(&mpriv->rwlock, &rwlock_attr); pthread_rwlockattr_destroy(&rwlock_attr); if (rv) { DBG_LOG(MSG, MOD_CLUSTER, "pthread_rwlock_init() failed, rv=%d", rv); retval = 4; goto err_exit; } mpriv->flags = MP_FL_RWLOCK_INIT; return 0; err_exit: deinit_map_private_t(mpriv); return retval; }
pmix_status_t pmix_gds_ds12_lock_init(pmix_common_dstor_lock_ctx_t *ctx, const char *base_path, const char * name, uint32_t local_size, uid_t uid, bool setuid) { size_t size = pmix_common_dstor_getpagesize(); pmix_status_t rc = PMIX_SUCCESS; pthread_rwlockattr_t attr; ds12_lock_pthread_ctx_t *lock_ctx = (ds12_lock_pthread_ctx_t*)ctx; if (*ctx != NULL) { return PMIX_SUCCESS; } lock_ctx = (ds12_lock_pthread_ctx_t*)malloc(sizeof(ds12_lock_pthread_ctx_t)); if (NULL == lock_ctx) { rc = PMIX_ERR_INIT; PMIX_ERROR_LOG(rc); goto error; } memset(lock_ctx, 0, sizeof(ds12_lock_pthread_ctx_t)); *ctx = (pmix_common_dstor_lock_ctx_t*)lock_ctx; lock_ctx->segment = (pmix_pshmem_seg_t *)malloc(sizeof(pmix_pshmem_seg_t)); if (NULL == lock_ctx->segment) { rc = PMIX_ERR_OUT_OF_RESOURCE; PMIX_ERROR_LOG(rc); goto error; } /* create a lock file to prevent clients from reading while server is writing * to the shared memory. This situation is quite often, especially in case of * direct modex when clients might ask for data simultaneously. */ if(0 > asprintf(&lock_ctx->lockfile, "%s/dstore_sm.lock", base_path)) { rc = PMIX_ERR_OUT_OF_RESOURCE; PMIX_ERROR_LOG(rc); goto error; } PMIX_OUTPUT_VERBOSE((10, pmix_gds_base_framework.framework_output, "%s:%d:%s _lockfile_name: %s", __FILE__, __LINE__, __func__, lock_ctx->lockfile)); if (PMIX_PROC_IS_SERVER(pmix_globals.mypeer)) { if (PMIX_SUCCESS != (rc = pmix_pshmem.segment_create(lock_ctx->segment, lock_ctx->lockfile, size))) { PMIX_ERROR_LOG(rc); goto error; } memset(lock_ctx->segment->seg_base_addr, 0, size); if (0 != setuid) { if (0 > chown(lock_ctx->lockfile, (uid_t) uid, (gid_t) -1)){ rc = PMIX_ERROR; PMIX_ERROR_LOG(rc); goto error; } /* set the mode as required */ if (0 > chmod(lock_ctx->lockfile, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP )) { rc = PMIX_ERROR; PMIX_ERROR_LOG(rc); goto error; } } lock_ctx->rwlock = (pthread_rwlock_t *)lock_ctx->segment->seg_base_addr; if (0 != pthread_rwlockattr_init(&attr)) { rc = PMIX_ERROR; PMIX_ERROR_LOG(rc); goto error; } if (0 != pthread_rwlockattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) { pthread_rwlockattr_destroy(&attr); rc = PMIX_ERR_INIT; PMIX_ERROR_LOG(rc); goto error; } #ifdef HAVE_PTHREAD_SETKIND if (0 != pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)) { pthread_rwlockattr_destroy(&attr); PMIX_ERROR_LOG(PMIX_ERR_INIT); goto error; } #endif if (0 != pthread_rwlock_init(lock_ctx->rwlock, &attr)) { pthread_rwlockattr_destroy(&attr); PMIX_ERROR_LOG(PMIX_ERR_INIT); goto error; } if (0 != pthread_rwlockattr_destroy(&attr)) { PMIX_ERROR_LOG(PMIX_ERR_INIT); goto error; } } else { lock_ctx->segment->seg_size = size; snprintf(lock_ctx->segment->seg_name, PMIX_PATH_MAX, "%s", lock_ctx->lockfile); if (PMIX_SUCCESS != (rc = pmix_pshmem.segment_attach(lock_ctx->segment, PMIX_PSHMEM_RW))) { PMIX_ERROR_LOG(rc); goto error; } lock_ctx->rwlock = (pthread_rwlock_t *)lock_ctx->segment->seg_base_addr; } return PMIX_SUCCESS; error: if (NULL != lock_ctx) { if (lock_ctx->segment) { /* detach & unlink from current desc */ if (lock_ctx->segment->seg_cpid == getpid()) { pmix_pshmem.segment_unlink(lock_ctx->segment); } pmix_pshmem.segment_detach(lock_ctx->segment); lock_ctx->rwlock = NULL; } if (NULL != lock_ctx->lockfile) { free(lock_ctx->lockfile); } free(lock_ctx); *ctx = (pmix_common_dstor_lock_ctx_t*)NULL; } return rc; }
struct hash_table * hashtable_init(struct hash_param *hparam) { /* The hash table being constructed */ struct hash_table *ht = NULL; /* The index for initializing each partition */ uint32_t index = 0; /* Read-Write Lock attributes, to prevent write starvation under GLIBC */ pthread_rwlockattr_t rwlockattr; /* Hash partition */ struct hash_partition *partition = NULL; /* The number of fully initialized partitions */ uint32_t completed = 0; if (pthread_rwlockattr_init(&rwlockattr) != 0) return NULL; /* At some point factor this out into the OS directory. it is necessary to prevent writer starvation under GLIBC. */ #ifdef GLIBC if ((pthread_rwlockattr_setkind_np (&rwlockattrs, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)) != 0) { LogCrit(COMPONENT_HASHTABLE, "Unable to set writer-preference on lock attribute."); goto deconstruct; } #endif /* GLIBC */ ht = gsh_calloc(1, sizeof(struct hash_table) + (sizeof(struct hash_partition) * hparam->index_size)); /* Fixup entry size */ if (hparam->flags & HT_FLAG_CACHE) { if (!hparam->cache_entry_count) /* works fine with a good hash algo */ hparam->cache_entry_count = 32767; } /* We need to save copy of the parameters in the table. */ ht->parameter = *hparam; for (index = 0; index < hparam->index_size; ++index) { partition = (&ht->partitions[index]); RBT_HEAD_INIT(&(partition->rbt)); if (pthread_rwlock_init(&partition->lock, &rwlockattr) != 0) { LogCrit(COMPONENT_HASHTABLE, "Unable to initialize lock in hash table."); goto deconstruct; } /* Allocate a cache if requested */ if (hparam->flags & HT_FLAG_CACHE) partition->cache = gsh_calloc(1, cache_page_size(ht)); completed++; } ht->node_pool = pool_basic_init(NULL, sizeof(rbt_node_t)); ht->data_pool = pool_basic_init(NULL, sizeof(struct hash_data)); pthread_rwlockattr_destroy(&rwlockattr); return ht; deconstruct: while (completed != 0) { if (hparam->flags & HT_FLAG_CACHE) gsh_free(ht->partitions[completed - 1].cache); PTHREAD_RWLOCK_destroy(&(ht->partitions[completed - 1].lock)); completed--; } if (ht->node_pool) pool_destroy(ht->node_pool); if (ht->data_pool) pool_destroy(ht->data_pool); gsh_free(ht); return ht = NULL; }
void CUserQueryMain::UserQueryMainCore() { BdxPrintConfig(); // inint read write lock pthread_rwlockattr_init(&p_rwlock_attr); pthread_rwlockattr_setkind_np(&p_rwlock_attr,PTHREAD_RWLOCK_PREFER_WRITER_NP); if (pthread_rwlock_init(&p_rwlock, &p_rwlock_attr)) { throw std::runtime_error("pthread_rwlock_init error."); } pthread_mutex_init (&mutex,NULL); for(u_int i = 0; i < m_uiThreadsNum; ++i) { CUserQueryWorkThreads* pclWorkThread = new CUserQueryWorkThreads(m_stServerInfo.m_stRedisServer,m_stServerInfo.m_stGoodsServer); m_cThreads.SetRoutine(StartRoutine<CUserQueryWorkThreads>); m_cThreads.CreateThead(pclWorkThread); } LOG(DEBUG, "start work threads ok."); printf("Line:%d,start work threads ok.\n",__LINE__); #if 1 for(std::map<std::string,QUERYAPIINFO_S>::iterator itr = g_vecUrlAPIS.begin();itr!=g_vecUrlAPIS.end();itr++) { printf("Line:%d,%s \n",__LINE__,itr->first.c_str()); } #endif { CUserQueryCount* pCount = new CUserQueryCount(m_stStatisticsPrm,m_stMysqlInfo); m_cThreads.SetRoutine(StartRoutine<CUserQueryCount>); m_cThreads.CreateThead(pCount); } { CUserQueryHiveLog* pHiveLog = new CUserQueryHiveLog(m_stHiveLogPrm,m_stMysqlInfo); m_cThreads.SetRoutine(StartRoutine<CUserQueryHiveLog>); m_cThreads.CreateThead(pHiveLog); } { CUserQueryServer* pServer = new CUserQueryServer(m_stServerInfo.m_stLocalServer, m_uiThreadsNum); m_cThreads.SetRoutine(StartRoutine<CUserQueryServer>); m_cThreads.CreateThead(pServer); } //LOG(DEBUG, "start Adapter server Ok [port : %d]", m_stServerInfo.m_stLocalServer.m_uiPort); { //CUserQueryUpdate* pMonitorThread = new CUserQueryUpdate(m_stServerInfo.m_stTokenServer,m_stMysqlInfo); CUserQueryUpdate* pMonitorThread = new CUserQueryUpdate(m_stServerInfo.m_stRedisServer,m_stMysqlInfo); m_cThreads.SetRoutine(StartRoutine<CUserQueryUpdate>); m_cThreads.CreateThead(pMonitorThread); } LOG(DEBUG, "start Adapter Monitor Ok [Config File : %s]",strConfigFileName.c_str()); m_cThreads.ThreadJionALL(); }