struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) { struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); memset(uli->lock_ptr, 0, UWSGI_LOCK_SIZE); uli->can_deadlock = 1; return uli; }
struct uwsgi_lock_item *uwsgi_rwlock_fast_init(char *id) { #ifdef OBSOLETE_LINUX_KERNEL return uwsgi_lock_fast_init(id); #else pthread_rwlockattr_t attr; struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 1); if (pthread_rwlockattr_init(&attr)) { uwsgi_log("unable to allocate rwlock structure\n"); exit(1); } if (pthread_rwlockattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) { uwsgi_log("unable to share rwlock\n"); exit(1); } if (pthread_rwlock_init((pthread_rwlock_t *) uli->lock_ptr, &attr)) { uwsgi_log("unable to initialize rwlock\n"); exit(1); } pthread_rwlockattr_destroy(&attr); uli->can_deadlock = 1; return uli; #endif }
struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) { struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); struct _SECURITY_ATTRIBUTES sa; memset(&sa, 0, sizeof(struct _SECURITY_ATTRIBUTES)); sa.bInheritHandle = 1; uli->lock_ptr = CreateMutex(&sa, FALSE, NULL); return uli; }
// REMEMBER lock must contains space for both pthread_mutex_t and pthread_mutexattr_t !!! struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) { pthread_mutexattr_t attr; struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); #ifdef EOWNERDEAD retry: #endif if (pthread_mutexattr_init(&attr)) { uwsgi_log("unable to allocate mutexattr structure\n"); exit(1); } if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) { uwsgi_log("unable to share mutex\n"); exit(1); } #ifdef EOWNERDEAD if (uwsgi_pthread_robust_mutexes_enabled) { if (pthread_mutexattr_setrobust_np(&attr, PTHREAD_MUTEX_ROBUST_NP)) { uwsgi_log("unable to make the mutex 'robust'\n"); exit(1); } } #endif if (pthread_mutex_init((pthread_mutex_t *) uli->lock_ptr, &attr)) { #ifdef EOWNERDEAD if (uwsgi_pthread_robust_mutexes_enabled) { uwsgi_log("!!! it looks like your kernel does not support pthread robust mutexes !!!\n"); uwsgi_log("!!! falling back to standard pthread mutexes !!!\n"); uwsgi_pthread_robust_mutexes_enabled = 0; pthread_mutexattr_destroy(&attr); goto retry; } #endif uwsgi_log("unable to initialize mutex\n"); exit(1); } pthread_mutexattr_destroy(&attr); #ifdef EOWNERDEAD if (!uwsgi_pthread_robust_mutexes_enabled) { uli->can_deadlock = 1; } #else uli->can_deadlock = 1; #endif return uli; }
struct uwsgi_lock_item *uwsgi_lock_ipcsem_init(char *id) { // used by ftok static int counter = 1; union semun { int val; struct semid_ds *buf; ushort *array; } semu; int semid; key_t myKey; struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); if (uwsgi.ftok) { myKey = ftok(uwsgi.ftok, counter); if (myKey < 0) { uwsgi_error("ftok()"); exit(1); } counter++; semid = semget(myKey, 1, IPC_CREAT | 0666); } else { semid = semget(IPC_PRIVATE, 1, IPC_CREAT | IPC_EXCL | 0666); } if (semid < 0) { uwsgi_error("semget()"); exit(1); } // do this now, to allows triggering of atexit hook in case of problems memcpy(uli->lock_ptr, &semid, sizeof(int)); semu.val = 1; if (semctl(semid, 0, SETVAL, semu)) { uwsgi_error("semctl()"); exit(1); } return uli; }
struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) { struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); sem_init((sem_t *) uli->lock_ptr, 1, 1); uli->can_deadlock = 1; return uli; }
struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) { struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); umtx_init((struct umtx *) uli->lock_ptr); return uli; }
// REMEMBER lock must contains space for both pthread_mutex_t and pthread_mutexattr_t !!! struct uwsgi_lock_item *uwsgi_lock_fast_init(char *id) { pthread_mutexattr_t attr; struct uwsgi_lock_item *uli = uwsgi_register_lock(id, 0); #ifdef EOWNERDEAD retry: #endif if (pthread_mutexattr_init(&attr)) { uwsgi_log("unable to allocate mutexattr structure\n"); exit(1); } if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) { uwsgi_log("unable to share mutex\n"); exit(1); } #ifdef EOWNERDEAD #ifndef PTHREAD_MUTEX_ROBUST #define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP #define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np #define pthread_mutex_consistent pthread_mutex_consistent_np #endif if (uwsgi_pthread_robust_mutexes_enabled) { int ret; if ((ret = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT)) != 0) { switch (ret) { case ENOTSUP: // PTHREAD_PRIO_INHERIT will only prevent // priority inversion when SCHED_FIFO or // SCHED_RR is used, so this is non-fatal and // also currently unsupported on musl. break; default: uwsgi_log("unable to set PTHREAD_PRIO_INHERIT\n"); exit(1); } } if (pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST)) { uwsgi_log("unable to make the mutex 'robust'\n"); exit(1); } } #endif if (pthread_mutex_init((pthread_mutex_t *) uli->lock_ptr, &attr)) { #ifdef EOWNERDEAD if (uwsgi_pthread_robust_mutexes_enabled) { uwsgi_log("!!! it looks like your kernel does not support pthread robust mutexes !!!\n"); uwsgi_log("!!! falling back to standard pthread mutexes !!!\n"); uwsgi_pthread_robust_mutexes_enabled = 0; pthread_mutexattr_destroy(&attr); goto retry; } #endif uwsgi_log("unable to initialize mutex\n"); exit(1); } pthread_mutexattr_destroy(&attr); #ifdef EOWNERDEAD if (!uwsgi_pthread_robust_mutexes_enabled) { uli->can_deadlock = 1; } #else uli->can_deadlock = 1; #endif return uli; }