Пример #1
0
ZEND_API zval *_zend_ts_hash_index_add_or_update(TsHashTable *ht, zend_ulong h, zval *pData, int flag ZEND_FILE_LINE_DC)
{
	zval *retval;

	begin_write(ht);
	retval = _zend_hash_index_add_or_update(TS_HASH(ht), h, pData, flag ZEND_FILE_LINE_RELAY_CC);
	end_write(ht);

	return retval;
}
Пример #2
0
ZEND_API int zend_ts_hash_del(TsHashTable *ht, zend_string *key)
{
	int retval;

	begin_write(ht);
	retval = zend_hash_del(TS_HASH(ht), key);
	end_write(ht);

	return retval;
}
Пример #3
0
ZEND_API zval *zend_ts_hash_add_empty_element(TsHashTable *ht, zend_string *key)
{
	zval *retval;

	begin_write(ht);
	retval = zend_hash_add_empty_element(TS_HASH(ht), key);
	end_write(ht);

	return retval;
}
Пример #4
0
ZEND_API void zend_ts_hash_apply_with_arguments(TsHashTable *ht, apply_func_args_t apply_func, int num_args, ...)
{
	va_list args;

	va_start(args, num_args);
	begin_write(ht);
	zend_hash_apply_with_arguments(TS_HASH(ht), apply_func, num_args, args);
	end_write(ht);
	va_end(args);
}
Пример #5
0
ZEND_API int zend_ts_hash_rehash(TsHashTable *ht)
{
	int retval;

	begin_write(ht);
	retval = zend_hash_rehash(TS_HASH(ht));
	end_write(ht);

	return retval;
}
Пример #6
0
ZEND_API int zend_ts_hash_index_del(TsHashTable *ht, zend_ulong h)
{
	int retval;

	begin_write(ht);
	retval = zend_hash_index_del(TS_HASH(ht), h);
	end_write(ht);

	return retval;
}
Пример #7
0
ZEND_API int zend_ts_hash_sort(TsHashTable *ht, sort_func_t sort_func, compare_func_t compare_func, int renumber)
{
	int retval;

	begin_write(ht);
	retval = zend_hash_sort_ex(TS_HASH(ht), sort_func, compare_func, renumber);
	end_write(ht);

	return retval;
}
Пример #8
0
ZEND_API int _zend_ts_hash_index_update_or_next_insert(TsHashTable *ht, ulong h, void *pData, uint nDataSize, void **pDest, int flag ZEND_FILE_LINE_DC)
{
	int retval;

	begin_write(ht);
	retval = _zend_hash_index_update_or_next_insert(TS_HASH(ht), h, pData, nDataSize, pDest, flag ZEND_FILE_LINE_RELAY_CC);
	end_write(ht);

	return retval;
}
Пример #9
0
ZEND_API int zend_ts_hash_add_empty_element(TsHashTable *ht, char *arKey, uint nKeyLength)
{
	int retval;

	begin_write(ht);
	retval = zend_hash_add_empty_element(TS_HASH(ht), arKey, nKeyLength);
	end_write(ht);

	return retval;
}
Пример #10
0
ZEND_API int _zend_ts_hash_add_or_update(TsHashTable *ht, char *arKey, uint nKeyLength, void *pData, uint nDataSize, void **pDest, int flag ZEND_FILE_LINE_DC)
{
	int retval;

	begin_write(ht);
	retval = _zend_hash_add_or_update(TS_HASH(ht), arKey, nKeyLength, pData, nDataSize, pDest, flag ZEND_FILE_LINE_RELAY_CC);
	end_write(ht);

	return retval;
}
Пример #11
0
/* Join a group.  The group context is returned in *contextp.  
 */
hot_err_t hot_ens_Join(
    hot_ens_JoinOps_t *jops,
    hot_context_t *contextp	/*OUT*/ 
) {
    hot_err_t err = HOT_OK ;
    hot_context_t s ;

    /* Initialize global state if not done so already.
     */
    if (!g.initialized) {
	err = hot_ens_Init(jops->outboard, jops->argv); 
	if (err != HOT_OK)
	    return err;
    }

    begin_write(); {    
	begin_critical(); {    
	    /* Allocate a new group context 
	     * Initialize the group record.
	     */
	    s = alloc_context();
	    s->joining = 1 ;
	    s->leaving = 0 ;
	    s->conf = jops->conf;
	    s->env = jops->env;
	    s->view = NULL ;
	    *contextp = s ;
	} end_critical();
	
	/* Write the downcall.
	 */
  	write_hdr(s,DN_JOIN);
	write_int(jops->heartbeat_rate);
	write_string(jops->transports);
	write_string(jops->protocol);
	write_string(jops->group_name);
	write_string(jops->properties);
	write_bool(jops->use_properties);
	write_bool(jops->groupd);
	write_string(jops->params);
	write_bool(jops->client);
	write_bool(jops->debug);

	if (jops->endpt.name[0] != 0x0) {
	    hot_sys_Warning("HOT_OUTBOARD does not support 'endpt' in join ops") ;
	    jops->endpt.name[0] = 0x0;
	}
	write_string(jops->princ);
	write_string(jops->key);
	write_bool(jops->secure);

    } end_write();

    return HOT_OK;
}
Пример #12
0
ZEND_API void zend_ts_hash_destroy(TsHashTable *ht)
{
	begin_write(ht);
	zend_hash_destroy(TS_HASH(ht));
	end_write(ht);

#ifdef ZTS
	tsrm_mutex_free(ht->mx_reader);
	tsrm_mutex_free(ht->mx_writer);
#endif
}
Пример #13
0
/* Inform Ensemble that the state-transfer is complete. 
 */
hot_err_t hot_ens_XferDone(
    hot_context_t s
) {
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_XferDone: member is leaving") ;
	    }
	} end_critical();
	write_hdr(s,DN_XFERDONE);
    } end_write();
    return HOT_OK;
}
Пример #14
0
/* Request a new view to be installed.
 */
hot_err_t hot_ens_RequestNewView(
    hot_context_t s
) {
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_RequestNewView: member is leaving") ;
	    }
	} end_critical();
	write_hdr(s,DN_PROMPT);
    } end_write();
    return HOT_OK;
}
Пример #15
0
/* Request a Rekey operation.
 */
hot_err_t hot_ens_Rekey(
    hot_context_t s
) {
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_Rekey: member is leaving") ;
	    }
	} end_critical();
	write_hdr(s,DN_REKEY);
    } end_write();
    return HOT_OK;
}
Пример #16
0
// Releases the reader_writer_lock
void reader_writer_lock::unlock() {
    if( my_current_writer!=tbb_thread::id() ) {
        // A writer owns the lock
        __TBB_ASSERT(is_current_writer(), "caller of reader_writer_lock::unlock() does not own the lock.");
        __TBB_ASSERT(writer_head, NULL);
        __TBB_ASSERT(writer_head->status==active, NULL);
        scoped_lock *a_writer_lock = writer_head;
        end_write(a_writer_lock);
        __TBB_ASSERT(a_writer_lock != writer_head, "Internal error: About to turn writer_head into dangling reference.");
        delete a_writer_lock;
    } else {
        end_read();
    }
}
Пример #17
0
static void outb_atexit(void) {
  char eof = EOF;

  /* Send a kill signal to the child.
   */
  kill(cid, SIGKILL);
  waitpid(cid, NULL, 0);
  
  /* In case signal fails, write EOF to the pipe.
   */
  begin_write() ;
  do_write(&eof, 4);
  end_write() ;
}
Пример #18
0
/* zodiac_spew - Takes a message type, an array of data words, and a length
 * for the array, and prepends a 5 word header (including checksum).
 * The data words are expected to be checksummed.
 */
static ssize_t zodiac_spew(struct gps_device_t *session, unsigned short type,
			   unsigned short *dat, int dlen)
{
    struct header h;
    int i;
    char buf[BUFSIZ];

    h.sync = 0x81ff;
    h.id = (unsigned short)type;
    h.ndata = (unsigned short)(dlen - 1);
    h.flags = 0;
    h.csum = zodiac_checksum((unsigned short *)&h, 4);

    if (!BAD_SOCKET(session->gpsdata.gps_fd)) {
	size_t hlen, datlen;
	hlen = sizeof(h);
	datlen = sizeof(unsigned short) * dlen;
	if (end_write(session->gpsdata.gps_fd, &h, hlen) != (ssize_t) hlen ||
	    end_write(session->gpsdata.gps_fd, dat,
		      datlen) != (ssize_t) datlen) {
	    gpsd_log(&session->context->errout, LOG_RAW,
		     "Reconfigure write failed\n");
	    return -1;
	}
    }

    (void)snprintf(buf, sizeof(buf),
		   "%04x %04x %04x %04x %04x",
		   h.sync, h.id, h.ndata, h.flags, h.csum);
    for (i = 0; i < dlen; i++)
	str_appendf(buf, sizeof(buf), " %04x", dat[i]);

    gpsd_log(&session->context->errout, LOG_RAW,
	     "Sent Zodiac packet: %s\n", buf);

    return 0;
}
Пример #19
0
struct linked_list* list_remove(struct linked_list_head *list, int val) {
  struct linked_list **p, *ret=NULL;
  begin_write(list->sync);
  p=&list->head;
  while (*p) {
    if ((*p)->nb==val) {
      ret=*p;
      *p=(*p)->next;
      break;
    }
    p=&(*p)->next;
  }
  end_write(list->sync);
  return ret;
}
Пример #20
0
struct linked_list* remove_element(struct linked_list_head *list, int val) {
	struct linked_list **p, *ret = NULL;
	begin_write(&list->sync);
	sleep(rand()%3 + 1);
	p = &list->head;
	while((*p) != NULL) {
		if ((*p)->nb == val) {
			ret = *p;
			*p = (*p)->next;
			break;
		}
		p = &(*p)->next;
	}
	end_write(&list->sync);
	return ret;
}
Пример #21
0
/* Report group members as failure-suspected.
 * 
 * NB:  In the initial implementation, this downcall will not be supported.
 *      (if invoked, an exeption will be raised by OCAML).
 */
hot_err_t hot_ens_Suspect(
    hot_context_t s,
    hot_endpt_t *suspects, 
    int nsuspects
) {
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_Suspect: member is leaving") ;
	    }
	} end_critical();
	write_hdr(s,DN_SUSPECT);
	write_endpList(nsuspects, suspects);
    } end_write();
    return HOT_OK;
}
Пример #22
0
/* Request a protocol change.
 */
hot_err_t hot_ens_ChangeProtocol(
    hot_context_t s,
    char *protocol
) {
    assert(protocol);
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_ChangeProtocol: member is leaving") ;
	    }
	} end_critical();
	write_hdr(s,DN_PROTOCOL);
	write_string(protocol); 
    } end_write();
    return HOT_OK;
}
Пример #23
0
/* Request a protocol change specifying properties.
 */
hot_err_t hot_ens_ChangeProperties(
    hot_context_t s,
    char *properties
) {
    assert(properties) ;
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_ChangeProperties: member is leaving") ;
	    }
	} end_critical();
	write_hdr(s,DN_PROPERTIES);
	write_string(properties); 
    } end_write();
    return HOT_OK;
}
Пример #24
0
void add_element(struct linked_list_head *list, struct linked_list *l) {
	struct linked_list **p, **prec;
	begin_write(&list->sync);
	sleep(rand()%3 + 1);
	prec = NULL;
	p = &list->head;
	while((*p) != NULL) {
		prec = p;
		p = &(*p)->next;
	}
	if (prec != NULL) {
		(*prec)->next = l;
	} else {
		list->head = l;
	}
	end_write(&list->sync);
}
Пример #25
0
/* Leave a group.  This should be the last call made to a given context.
 * No more events will be delivered to this context after the call
 * returns.  
 */
hot_err_t hot_ens_Leave(
    hot_context_t s
) {
    begin_write(); {
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_Leave:this member is already leaving") ;
	    }
	    s->leaving = 1 ;
	} end_critical();
	    
	/* Write the downcall.
	 */
	write_hdr(s,DN_LEAVE);
    } end_write();
    
    return HOT_OK;
}
Пример #26
0
HOT_INLINE
hot_err_t hot_ens_Send(
    hot_context_t s, 
    hot_endpt_t *dest,
    hot_msg_t msg,
    hot_ens_MsgSendView *send_view /*OUT*/
) {
    begin_write(); { 
	begin_critical(); {
	    if (s->leaving) {
		hot_sys_Panic("hot_ens_Send: member is leaving") ;
	    }

	    if (send_view != NULL) {
		*send_view = (s->blocked) ?
		    HOT_ENS_MSG_SEND_NEXT_VIEW : HOT_ENS_MSG_SEND_CURRENT_VIEW ;
	    }
	} end_critical() ;

	if (s->blocked) {
	    write_hdr(s,DN_SEND_BLOCKED);
	    write_endpID(dest);
	    write_msg(msg);
	} else {
 	    unsigned i ;
	    begin_critical(); {
		for (i=0;i<s->nmembers;i++) {
		    if (!strcmp(s->view[i].name,dest->name))
			break ;
		}
		assert(i<s->nmembers) ;
	    } end_critical() ;
	    write_hdr(s,DN_SEND);
	    write_int(i);
	    write_msg(msg);
	}
    } end_write();
    return HOT_OK;
}
Пример #27
0
ZEND_API void zend_ts_hash_apply(TsHashTable *ht, apply_func_t apply_func TSRMLS_DC)
{
	begin_write(ht);
	zend_hash_apply(TS_HASH(ht), apply_func TSRMLS_CC);
	end_write(ht);
}
Пример #28
0
/**
 * Destroys the thread pool.
 *
 * 1. Lock the destruction lock (top priority... don't want to starve here)
 * 2. Change the state
 * 3. Unlock the destruction lock (now, no more tasks can be added because the state
 *	  is checked first, and the threads should know to check the state before dequeuing
 *    anything)
 * 4. Lock the tasks lock, so we're sure all threads are waiting for a signal (or for
 *    the lock) to prevent deadlock (see comments bellow).
 * 5. Broadcast to all threads waiting for tasks: if there's nothing now, there never
 *    will be!
 * 6. Unlock the tasks lock (to allow the threads to do their thing)
 * 7. Wait for all threads to finish
 * 8. Destroy all fields of the pool
 */
void tpDestroy(ThreadPool* tp, int should_wait_for_tasks) {
	
	if (!tp)												// Sanity check
		return;
	
	if (!start_write(tp)) {									// Someone is already writing to pool->state!
		return;												// This should only happen ONCE...
	}
	if (tp->state != ALIVE) {								// Destruction already in progress.
		end_write(tp);										// This can happen if destroy() is called twice fast
		return;
	}
	tp->state = should_wait_for_tasks ? DO_ALL : DO_RUN;	// Enter destroy mode
	PRINT("Destroying the pool! Now allowing state read.\n");
	end_write(tp);											// Allow reading the state

	// Make sure the queue isn't busy.
	// We shouldn't encounter deadlock/livelock here because threads wait for signals, and the only
	// possible source of a signal is here or in tpInsertTask().
	// If we lock this only AFTER we change the/ destruction state, we can create a situation where
	// a thread waits forever for a signal that will never come:
	// - A thread is in it's while loop, the queue is
	//   empty and the pool isn't being destroyed
	//   so it enters the body of the loop. Before it
	//   starts waiting for a signal...
	// - CS-->Main thread
	// - Main thread calls destroy, doesn't wait for
	//   the task_lock and writes the new destruction
	//   state. Then, broadcasts a signal to listening
	//   threads.
	// - CS-->Previous thread
	// - Starts waiting for a signal that will never
	//   come
	// By locking here before broadcasting the destruction, we make sure all threads are waiting for
	// the lock or for the signal! Either way, after the signal, the threads will know what to do and
	// exit the loop.
	pthread_mutex_lock(&tp->task_lock);
	PRINT("Pool destroyed, task lock locked, sending broadcast...\n");
	pthread_cond_broadcast(&tp->queue_not_empty_or_dying);
	PRINT("... done. Unlocking the task lock.\n");
	pthread_mutex_unlock(&tp->task_lock);
	
	// Wait for all threads to exit (this could take a while)
	int i;
	for (i=0; i<tp->N; ++i) {
#if HW3_DEBUG
		PRINT("Waiting for T%2d to finish (tid=%d)...\n", i+1, tp->tids[i]);
#else
		PRINT("Waiting for thread %d to finish...\n", i+1);
#endif
		pthread_join(tp->threads[i], NULL);
#if HW3_DEBUG
		PRINT("T%2d (tid=%d) done.\n", i+1, tp->tids[i]);
#else
		PRINT("Thread %d done.\n", i+1);
#endif
	}
	PRINT("All threads done! Locking the task lock and destroying the task queue...\n");
	
	// Cleanup!
	// Tasks (we can still lock here):
	pthread_mutex_lock(&tp->task_lock);
	while (!osIsQueueEmpty(tp->tasks)) {
		Task* t = (Task*)osDequeue(tp->tasks);
		free(t);
	}
	osDestroyQueue(tp->tasks);
	PRINT("Done. Unlocking the task queue\n");
	pthread_mutex_unlock(&tp->task_lock);
	
	// Locks:
	PRINT("Doing thread pool cleanup...\n");
	pthread_mutex_destroy(&tp->task_lock);
	pthread_mutexattr_destroy(&tp->mutex_type);
	sem_destroy(&tp->r_num_mutex);
	sem_destroy(&tp->w_flag_mutex);
	sem_destroy(&tp->r_entry);
	sem_destroy(&tp->r_try);
	sem_destroy(&tp->state_lock);
	pthread_cond_destroy(&tp->queue_not_empty_or_dying);
	
	// Last cleanup, and out:
#if HW3_DEBUG
	free(tp->tids);
#endif
	free(tp->threads);
	free(tp);
	PRINT("Done destroying thread pool.\n");
	return;
}
Пример #29
0
ZEND_API void zend_ts_hash_reverse_apply(TsHashTable *ht, apply_func_t apply_func)
{
	begin_write(ht);
	zend_hash_reverse_apply(TS_HASH(ht), apply_func);
	end_write(ht);
}
Пример #30
0
ZEND_API void zend_ts_hash_apply_with_argument(TsHashTable *ht, apply_func_arg_t apply_func, void *argument)
{
	begin_write(ht);
	zend_hash_apply_with_argument(TS_HASH(ht), apply_func, argument);
	end_write(ht);
}