Exemple #1
0
int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
{
	return cond_wait(cv, mut, K_FOREVER);
}
Exemple #2
0
static bool_t
clnt_vc_control(
	CLIENT *cl,
	u_int request,
	char *info
)
{
	struct ct_data *ct;
	void *infop = info;
#ifdef _REENTRANT
	sigset_t mask;
#endif
	sigset_t newmask;

	_DIAGASSERT(cl != NULL);

	ct = (struct ct_data *)cl->cl_private;

	__clnt_sigfillset(&newmask);
	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
	mutex_lock(&clnt_fd_lock);
#ifdef _REENTRANT
	while (vc_fd_locks[ct->ct_fd])
		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
	vc_fd_locks[ct->ct_fd] = __rpc_lock_value;
#endif
	mutex_unlock(&clnt_fd_lock);

	switch (request) {
	case CLSET_FD_CLOSE:
		ct->ct_closeit = TRUE;
		release_fd_lock(ct->ct_fd, mask);
		return (TRUE);
	case CLSET_FD_NCLOSE:
		ct->ct_closeit = FALSE;
		release_fd_lock(ct->ct_fd, mask);
		return (TRUE);
	default:
		break;
	}

	/* for other requests which use info */
	if (info == NULL) {
		release_fd_lock(ct->ct_fd, mask);
		return (FALSE);
	}
	switch (request) {
	case CLSET_TIMEOUT:
		if (time_not_ok((struct timeval *)(void *)info)) {
			release_fd_lock(ct->ct_fd, mask);
			return (FALSE);
		}
		ct->ct_wait = *(struct timeval *)infop;
		ct->ct_waitset = TRUE;
		break;
	case CLGET_TIMEOUT:
		*(struct timeval *)infop = ct->ct_wait;
		break;
	case CLGET_SERVER_ADDR:
		(void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
		break;
	case CLGET_FD:
		*(int *)(void *)info = ct->ct_fd;
		break;
	case CLGET_SVC_ADDR:
		/* The caller should not free this memory area */
		*(struct netbuf *)(void *)info = ct->ct_addr;
		break;
	case CLSET_SVC_ADDR:		/* set to new address */
		release_fd_lock(ct->ct_fd, mask);
		return (FALSE);
	case CLGET_XID:
		/*
		 * use the knowledge that xid is the
		 * first element in the call structure
		 * This will get the xid of the PREVIOUS call
		 */
		ntohlp(info, &ct->ct_u.ct_mcalli);
		break;
	case CLSET_XID:
		/* This will set the xid of the NEXT call */
		/* increment by 1 as clnt_vc_call() decrements once */
		htonlp(&ct->ct_u.ct_mcalli, info, 1);
		break;
	case CLGET_VERS:
		/*
		 * This RELIES on the information that, in the call body,
		 * the version number field is the fifth field from the
		 * begining of the RPC header. MUST be changed if the
		 * call_struct is changed
		 */
		ntohlp(info, ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT);
		break;

	case CLSET_VERS:
		htonlp(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT, info, 0);
		break;

	case CLGET_PROG:
		/*
		 * This RELIES on the information that, in the call body,
		 * the program number field is the fourth field from the
		 * begining of the RPC header. MUST be changed if the
		 * call_struct is changed
		 */
		ntohlp(info, ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT);
		break;

	case CLSET_PROG:
		htonlp(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT, info, 0);
		break;

	default:
		release_fd_lock(ct->ct_fd, mask);
		return (FALSE);
	}
	release_fd_lock(ct->ct_fd, mask);
	return (TRUE);
}
/*
 * -----------------------------------------------------------------
 *			fscache_server_alive
 *
 * Description:
 * Arguments:
 * Returns:
 * Preconditions:
 */
void
fscache_server_alive(cfsd_fscache_object_t *fscache_object_p,
	cfsd_kmod_object_t *kmod_object_p)
{

	int xx;
	cfs_fid_t rootfid;
	dl_cred_t cr;
	cfs_vattr_t va;
	char cfsopt[CFS_MAXMNTOPTLEN];
	int child_pid;
	int stat_loc;

	dbug_enter("fscache_server_alive");

	dbug_precond(fscache_object_p);
	dbug_precond(kmod_object_p);

	for (;;) {
		/* wait for a little while */
		if (fscache_object_p->i_simdis == 0)
			cfsd_sleep(30);
		/* if simulating disconnect */
		fscache_lock(fscache_object_p);
		while (fscache_object_p->i_simdis &&
			!fscache_object_p->i_tryunmount) {
			dbug_print(("simdis", "before calling cond_wait"));
			xx = cond_wait(&fscache_object_p->i_cvwait,
			    &fscache_object_p->i_lock);
			dbug_print(("simdis", "cond_wait woke up %d %d",
			    xx, fscache_object_p->i_simdis));
		}
		fscache_unlock(fscache_object_p);

		if (fscache_object_p->i_tryunmount)
			break;

		/* see if the server is alive */
		if (fscache_pingserver(fscache_object_p) == -1) {
			/* dead server */
			continue;
		}

		/* try to mount the back file system if needed */
		if (fscache_object_p->i_backpath[0] == '\0') {
			dbug_precond(fscache_object_p->i_cfsopt[0]);
			dbug_precond(fscache_object_p->i_backfs[0]);
			dbug_precond(fscache_object_p->i_mntpt[0]);

			snprintf(cfsopt, sizeof (cfsopt), "%s,slide,remount",
			    fscache_object_p->i_cfsopt);
			/*
			 * Mounting of a cachefs file system is done by calling
			 * out to /usr/lib/fs/cachefs/mount so that mounts
			 * done by the user, autofs and by us here in cachefsd
			 * are consistent.
			 */
			switch ((child_pid = fork1())) {
			case -1:
				/*
				 * The original code used system()
				 * but never checked for an error
				 * occurring. The rest of the code
				 * would suggest that "continue" is
				 * the correct thing to do.
				 */
				dbug_print(("info", "unable to fork mount "
				    "process for back fs %s %d",
				    fscache_object_p->i_backfs, errno));
				continue;
			case 0:
				(void) setsid();
				execl("/usr/sbin/mount", "mount", "-F",
				    "cachefs", "-o", cfsopt,
				    fscache_object_p->i_backfs,
				    fscache_object_p->i_mntpt, NULL);
				break;
			default:
				(void) waitpid(child_pid, &stat_loc, WUNTRACED);
			}

		}

		/* get the root fid of the file system */
		xx = kmod_rootfid(kmod_object_p, &rootfid);
		if (xx) {
			dbug_print(("info", "could not mount back fs %s %d",
			    fscache_object_p->i_backfs, xx));
			continue;
		}

		/* dummy up a fake kcred */
		(void) memset(&cr, 0, sizeof (cr));

		/* try to get attrs on the root */
		xx = kmod_getattrfid(kmod_object_p, &rootfid, &cr, &va);
		if ((xx == ETIMEDOUT) || (xx == EIO)) {
			dbug_print(("info", "Bogus error %d", xx));
			continue;
		}
		break;
	}
	dbug_leave("fscache_server_alive");
}
Exemple #4
0
/*
 *	load_media - load media into a drive
 *
 */
req_comp_t
load_media(
	library_t *library,
	drive_state_t *drive,
	struct CatalogEntry *ce,
	ushort_t category)
{
	req_comp_t 	err;
	xport_state_t 	*transport;
	robo_event_t 	*load, *tmp;
	ibm_req_info_t 	*ibm_info;

	ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0);
	memset(ibm_info, 0, sizeof (ibm_req_info_t));
	ibm_info->drive_id = drive->drive_id;
	ibm_info->src_cat = category;

	if (ce != NULL)
		memcpy((void *)&drive->bar_code, ce->CeBarCode, 8);
	else {
		memset((void *)&drive->bar_code, 0, 8);
		memset((void *)&drive->bar_code, ' ', 6);
	}

	sprintf((void *)&ibm_info->volser, "%-8.8s", drive->bar_code);

	/* Build transport thread request */
	load = malloc_wait(sizeof (robo_event_t), 5, 0);
	(void) memset(load, 0, sizeof (robo_event_t));
	load->request.internal.command = ROBOT_INTRL_LOAD_MEDIA;
	load->request.internal.address = (void *)ibm_info;

	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG,
		    "load_media(%d): from %s to %s.", LIBEQ, drive->bar_code,
		    drive->un->name);

	load->type = EVENT_TYPE_INTERNAL;
	load->status.bits = REST_SIGNAL;
	load->completion = REQUEST_NOT_COMPLETE;

	transport = library->transports;
	mutex_lock(&load->mutex);
	mutex_lock(&transport->list_mutex);
	if (transport->active_count == 0)
		transport->first = load;
	else {
		LISTEND(transport, tmp);
		append_list(tmp, load);
	}
	transport->active_count++;
	cond_signal(&transport->list_condit);
	mutex_unlock(&transport->list_mutex);

	/* Wait for the transport to do the unload */
	while (load->completion == REQUEST_NOT_COMPLETE)
		cond_wait(&load->condit, &load->mutex);
	mutex_unlock(&load->mutex);

	err = (req_comp_t)load->completion;
	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG,
		    "Return from transport load (%#x).", load->completion);

	free(ibm_info);
	mutex_destroy(&load->mutex);
	free(load);
	return (err);
}
Exemple #5
0
/*
 *	query_library - send the supplied query to the library.
 */
req_comp_t
query_library(
	library_t *library,
	int seqno,		/* starting-1 sequence number */
	int sub_cmd,		/* what query */
	void **ret_data,	/* return data */
	ushort_t category)
{
	req_comp_t 	err;
	ibm_req_info_t 	*ibm_info;
	xport_state_t 	*transport;
	robo_event_t 	*view, *tmp;

	ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0);
	memset(ibm_info, 0, sizeof (ibm_req_info_t));
	ibm_info->sub_cmd = sub_cmd;
	ibm_info->src_cat = category;
	ibm_info->seqno = seqno;
	memset(&ibm_info->volser[0], ' ', 8);

	/* Build transport thread request */
	view = malloc_wait(sizeof (robo_event_t), 5, 0);
	(void) memset(view, 0, sizeof (robo_event_t));
	view->request.internal.command = ROBOT_INTRL_QUERY_LIBRARY;
	view->request.internal.address = (void *)ibm_info;

	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG, "query_library: %#x.", sub_cmd);

	view->type = EVENT_TYPE_INTERNAL;
	view->status.bits = REST_SIGNAL;
	view->completion = REQUEST_NOT_COMPLETE;
	transport = library->transports;

	mutex_lock(&view->mutex);
	mutex_lock(&transport->list_mutex);
	if (transport->active_count == 0)
		transport->first = view;
	else {
		LISTEND(transport, tmp);
		append_list(tmp, view);
	}
	transport->active_count++;
	cond_signal(&transport->list_condit);
	mutex_unlock(&transport->list_mutex);

	/* Wait for the transport to do the request */
	while (view->completion == REQUEST_NOT_COMPLETE)
		cond_wait(&view->condit, &view->mutex);
	mutex_unlock(&view->mutex);

	err = (req_comp_t)view->completion;
	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG, "Return from query_library (%#x).",
		    view->completion);
	if (!err) {
		*ret_data = malloc_wait(sizeof (IBM_query_info_t), 2, 0);
		memcpy(*ret_data, ibm_info->ret_data,
		    sizeof (IBM_query_info_t));
	} else
		*ret_data = NULL;

	free(ibm_info);
	mutex_destroy(&view->mutex);
	free(view);
	return (err);
}
Exemple #6
0
static void *
gtick(void *arg)
{
	struct tstate		*state = arg;
	char			*errstr;
	uint_t			nsamples;
	uint_t			sample_cnt = 1;
	hrtime_t		ht, htdelta, restdelta;
	cpc_setgrp_t		*sgrp = state->sgrp;
	cpc_set_t		*this = cpc_setgrp_getset(sgrp);
	const char		*name = cpc_setgrp_getname(sgrp);
	cpc_buf_t		**data1, **data2, **scratch;
	cpc_buf_t		*tmp;
	int			nreqs;
	thread_t		tid;

	htdelta = NSECS_PER_MSEC * opts->mseconds;
	restdelta = NSECS_PER_MSEC * opts->mseconds_rest;
	ht = gethrtime();

	/*
	 * If this CPU is SMT, we run one gtick() thread per _physical_ CPU,
	 * instead of per cpu_t. The following check returns if it detects that
	 * this cpu_t has not been designated to do the counting for this
	 * physical CPU.
	 */
	if (smt && chip_designees[state->chip_id] != state->cpuid)
		return (NULL);

	/*
	 * If we need to run a soaker thread on this CPU, start it here.
	 */
	if (opts->dosoaker) {
		if (cond_init(&state->soak_cv, USYNC_THREAD, NULL) != 0)
			goto bad;
		if (mutex_init(&state->soak_lock, USYNC_THREAD,
		    NULL) != 0)
			goto bad;
		(void) mutex_lock(&state->soak_lock);
		state->soak_state = SOAK_PAUSE;
		if (thr_create(NULL, 0, soaker, state, NULL, &tid) != 0)
			goto bad;

		while (state->soak_state == SOAK_PAUSE)
			(void) cond_wait(&state->soak_cv,
			    &state->soak_lock);
		(void) mutex_unlock(&state->soak_lock);

		/*
		 * If the soaker needs to pause for the first set, stop it now.
		 */
		if (cpc_setgrp_sysonly(sgrp) == 0) {
			(void) mutex_lock(&state->soak_lock);
			state->soak_state = SOAK_PAUSE;
			(void) mutex_unlock(&state->soak_lock);
		}
	}
	if (cpc_bind_cpu(cpc, state->cpuid, this, 0) == -1)
		goto bad;

	for (nsamples = opts->nsamples; nsamples; nsamples--, sample_cnt++) {
		hrtime_t htnow;
		struct timespec ts;

		nreqs = cpc_setgrp_getbufs(sgrp, &data1, &data2, &scratch);

		ht += htdelta;
		htnow = gethrtime();
		if (ht <= htnow)
			continue;
		ts.tv_sec = (time_t)((ht - htnow) / NSECS_PER_SEC);
		ts.tv_nsec = (suseconds_t)((ht - htnow) % NSECS_PER_SEC);

		(void) nanosleep(&ts, NULL);

		if (opts->nsets == 1) {
			/*
			 * If we're dealing with one set, buffer usage is:
			 *
			 * data1 = most recent data snapshot
			 * data2 = previous data snapshot
			 * scratch = used for diffing data1 and data2
			 *
			 * Save the snapshot from the previous sample in data2
			 * before putting the current sample in data1.
			 */
			tmp = *data1;
			*data1 = *data2;
			*data2 = tmp;
			if (cpc_set_sample(cpc, this, *data1) != 0)
				goto bad;
			cpc_buf_sub(cpc, *scratch, *data1, *data2);

			print_sample(state->cpuid, *scratch, nreqs, name, 0);
		} else {
			/*
			 * More than one set is in use (multiple -c options
			 * given). Buffer usage in this case is:
			 *
			 * data1 = total counts for this set since program began
			 * data2 = unused
			 * scratch = most recent data snapshot
			 */
			name = cpc_setgrp_getname(sgrp);
			nreqs = cpc_setgrp_getbufs(sgrp, &data1, &data2,
			    &scratch);

			if (cpc_set_sample(cpc, this, *scratch) != 0)
				goto bad;

			cpc_buf_add(cpc, *data1, *data1, *scratch);

			if (cpc_unbind(cpc, this) != 0)
				(void) fprintf(stderr, gettext("%s: error "
				    "unbinding on cpu %d - %s\n"),
				    opts->pgmname, state->cpuid,
				    strerror(errno));

			this = cpc_setgrp_nextset(sgrp);

			print_sample(state->cpuid, *scratch, nreqs, name, 0);

			/*
			 * If periodic behavior was requested, rest here.
			 */
			if (opts->doperiod && opts->mseconds_rest > 0 &&
			    (sample_cnt % opts->nsets) == 0) {
				/*
				 * Stop the soaker while the tool rests.
				 */
				if (opts->dosoaker) {
					(void) mutex_lock(&state->soak_lock);
					if (state->soak_state == SOAK_RUN)
						state->soak_state = SOAK_PAUSE;
					(void) mutex_unlock(&state->soak_lock);
				}

				htnow = gethrtime();
				ht += restdelta;
				ts.tv_sec = (time_t)((ht - htnow) /
				    NSECS_PER_SEC);
				ts.tv_nsec = (suseconds_t)((ht - htnow) %
				    NSECS_PER_SEC);

				(void) nanosleep(&ts, NULL);
			}

			/*
			 * Start or stop the soaker if needed.
			 */
			if (opts->dosoaker) {
				(void) mutex_lock(&state->soak_lock);
				if (cpc_setgrp_sysonly(sgrp) &&
				    state->soak_state == SOAK_PAUSE) {
					/*
					 * Soaker is paused but the next set is
					 * sysonly: start the soaker.
					 */
					state->soak_state = SOAK_RUN;
					(void) cond_signal(&state->soak_cv);
				} else if (cpc_setgrp_sysonly(sgrp) == 0 &&
				    state->soak_state == SOAK_RUN)
					/*
					 * Soaker is running but the next set
					 * counts user events: stop the soaker.
					 */
					state->soak_state = SOAK_PAUSE;
				(void) mutex_unlock(&state->soak_lock);
			}

			if (cpc_bind_cpu(cpc, state->cpuid, this, 0) != 0)
				goto bad;
		}
	}

	if (cpc_unbind(cpc, this) != 0)
		(void) fprintf(stderr, gettext("%s: error unbinding on"
		    " cpu %d - %s\n"), opts->pgmname,
		    state->cpuid, strerror(errno));

	/*
	 * We're done, so stop the soaker if needed.
	 */
	if (opts->dosoaker) {
		(void) mutex_lock(&state->soak_lock);
		if (state->soak_state == SOAK_RUN)
			state->soak_state = SOAK_PAUSE;
		(void) mutex_unlock(&state->soak_lock);
	}

	return (NULL);
bad:
	state->status = 3;
	errstr = strerror(errno);
	(void) fprintf(stderr, gettext("%s: cpu%d - %s\n"),
	    opts->pgmname, state->cpuid, errstr);
	return (NULL);
}
Exemple #7
0
int 
ldap_pvt_thread_cond_wait( ldap_pvt_thread_cond_t *cond, 
	ldap_pvt_thread_mutex_t *mutex )
{
	return( cond_wait( cond, mutex ) );
}
Exemple #8
0
/* cond_global: use a global condition variable to process array's data */
void
cond_global(Workblk *array, struct scripttab *k)
{
	/* acquire the global condition lock */

#ifdef SOLARIS
	mutex_lock(&global_cond_lock);
#endif
#ifdef POSIX
	pthread_mutex_lock(&global_cond_lock);
#endif

	/* check to see if the condition flag is true, If not then wait
	for that condition flag to become true. */
	while (global_cond_flag != TRUE) {

#ifdef SOLARIS
		cond_wait(&global_cond, &global_cond_lock);
#endif
#ifdef POSIX
		pthread_cond_wait(&global_cond, &global_cond_lock);
#endif
	}
	/* Now, condition is true, and we have the global_cond_lock */

	/* set the condition flag to be FALSE, so when a new thread
	 * is created, it should wait till this one is done.
	 */
	global_cond_flag = FALSE;

	/* free the global_cond_lock and acquire the global lock */
#ifdef SOLARIS
	mutex_unlock(&global_cond_lock);
	mutex_lock(&global_lock);
#endif
#ifdef POSIX
	pthread_mutex_unlock(&global_cond_lock);
	pthread_mutex_lock(&global_lock);
#endif

	array->ready = gethrtime();
	array->vready = gethrvtime();

	array->compute_ready = array->ready;
	array->compute_vready = array->vready;

	/* do some work on the current array */
	(k->called_func)(&array->list[0]);

	array->compute_done = gethrtime();
	array->compute_vdone = gethrvtime();

	/* free the global lock */

#ifdef SOLARIS
	mutex_unlock(&global_lock);

	/* now set the condition, and signal any other threads */
	mutex_lock(&global_cond_lock);
#endif
#ifdef POSIX
	pthread_mutex_unlock(&global_lock);

	/* now set the condition, and signal any other threads */
	pthread_mutex_lock(&global_cond_lock);
#endif

	global_cond_flag = TRUE;
#ifdef SOLARIS
	cond_signal(&global_cond);
	mutex_unlock(&global_cond_lock);
#endif
#ifdef POSIX
	pthread_cond_signal(&global_cond);
	pthread_mutex_unlock(&global_cond_lock);
#endif
	/* make another call to preclude tail-call optimization on the unlock */
	(void) gethrtime();
}
Exemple #9
0
int
__nisdb_wlock_trylock(__nisdb_rwlock_t *rw, int trylock) {

	int		ret;
	pthread_t	myself = pthread_self();
	int		all_readers_blocked = 0;
	__nisdb_rl_t	*rr = 0;

	if (rw == 0) {
#ifdef	NISDB_MT_DEBUG
		/* This shouldn't happen */
		abort();
#endif	/* NISDB_MT_DEBUG */
		return (EFAULT);
	}

	if (rw->destroyed != 0)
		return (ESHUTDOWN);

	if ((ret = mutex_lock(&rw->mutex)) != 0)
		return (ret);

	if (rw->destroyed != 0) {
		(void) mutex_unlock(&rw->mutex);
		return (ESHUTDOWN);
	}

	/* Simplest (and probably most common) case: no readers or writers */
	if (rw->reader_count == 0 && rw->writer_count == 0) {
		rw->writer_count = 1;
		rw->writer.id = myself;
		rw->writer.count = 1;
		return (mutex_unlock(&rw->mutex));
	}

	/*
	 * Need to know if we're holding a read lock already, and if
	 * all other readers are blocked waiting for the mutex.
	 */
	if (rw->reader_count > 0) {
		if ((rr = find_reader(myself, rw)) != 0) {
			if (rr->count)
				/*
				 * We're already holding a read lock, so
				 * if the number of readers equals the number
				 * of blocked readers plus one, all other
				 * readers are blocked.
				 */
				if (rw->reader_count ==
						(rw->reader_blocked + 1))
					all_readers_blocked = 1;
			else
				/*
				 * We're not holding a read lock, so the
				 * number of readers should equal the number
				 * of blocked readers if all readers are
				 * blocked.
				 */
				if (rw->reader_count == rw->reader_blocked)
					all_readers_blocked = 1;
		}
	}

	/* Wait for reader(s) or writer to finish */
	while (1) {
		/*
		 * We can stop looping if one of the following holds:
		 *	- No readers, no writers
		 *	- No writers (or writer is myself), and one of:
		 *		- No readers
		 *		- One reader, and it's us
		 *		- N readers, but all blocked on the mutex
		 */
		if (
			(rw->writer_count == 0 && rw->reader_count == 0) ||
			((rw->writer_count == 0 || rw->writer.id == myself) &&
				(rw->reader_count == 0) ||
				(rw->reader_count == 1 &&
					rw->reader.id == myself))) {
			break;
		}
		/*
		 * Provided that all readers are blocked on the mutex
		 * we break a potential dead-lock by acquiring the
		 * write lock.
		 */
		if (all_readers_blocked) {
			if (rw->writer_count == 0 || rw->writer.id == myself) {
				break;
			}
		}

		/*
		 * If 'trylock' is set, tell the caller that we'd have to
		 * block to obtain the lock.
		 */
		if (trylock) {
			(void) mutex_unlock(&rw->mutex);
			return (EBUSY);
		}

		/* If we're also a reader, indicate that we're blocking */
		if (rr != 0) {
			rr->wait = 1;
			rw->reader_blocked++;
		}
		if ((ret = cond_wait(&rw->cv, &rw->mutex)) != 0) {
			if (rr != 0) {
				rr->wait = 0;
				if (rw->reader_blocked > 0)
					rw->reader_blocked--;
#ifdef	NISDB_MT_DEBUG
				else
					abort();
#endif	/* NISDB_MT_DEBUG */
			}
			(void) mutex_unlock(&rw->mutex);
			return (ret);
		}
		if (rr != 0) {
			rr->wait = 0;
			if (rw->reader_blocked > 0)
				rw->reader_blocked--;
#ifdef	NISDB_MT_DEBUG
			else
				abort();
#endif	/* NISDB_MT_DEBUG */
		}
	}

	/* OK to grab the write lock */
	rw->writer.id = myself;
	/* Increment lock depth */
	rw->writer.count++;
	/* Set number of writers (doesn't increase with lock depth) */
	if (rw->writer_count == 0)
		rw->writer_count = 1;

	return (mutex_unlock(&rw->mutex));
}
void o_conWait( o_con_t c, o_mtx_t m ) {
    cond_wait( c, m );
}
Exemple #11
0
void main(int argc, char ** argv)
{
  Circular_Buffer * cir_buffer;
  uint32 h_mem;
  sem_t s_procs_completed;

  int ct = 0;

  if (argc != 3) { 
    Printf("Usage: "); Printf(argv[0]); Printf(" <handle_to_shared_memory_page> <handle_to_page_mapped_semaphore>\n"); 
    Exit();
  }

  // Convert the command-line strings into integers for use as handles
  h_mem = dstrtol(argv[1], NULL, 10); // The "10" means base 10
  s_procs_completed = dstrtol(argv[2], NULL, 10);

  // Map shared memory page into this process's memory space
  if ((cir_buffer = (Circular_Buffer *)shmat(h_mem)) == NULL) {
    Printf("Could not map the virtual address to the memory in "); Printf(argv[0]); Printf(", exiting...\n");
    Exit();
  }


  while(ct < STRING_LENGTH){
    // Get the lock
    if(lock_acquire(cir_buffer->buffer_lock) != SYNC_SUCCESS){
      Printf("Get the lock failed !!!!!!!!!!!!!!!!!!\n");
      Exit();
    }

    /* Printf("Consumer %d holds the lock %d, head = %d, tail = %d\n", getpid(), cir_buffer->buffer_lock, cir_buffer->head, cir_buffer->tail); */

    //Printf("Consumer, before checking if buffer empty.\n");
    // Consume an item to the buffer
    while(cir_buffer->head == cir_buffer->tail){
      // conditional wait when empty
      if(cond_wait(cir_buffer->buffer_cond) != SYNC_SUCCESS)
	{
	  Printf("Consumer conditional wait not empty unsuccessful.\n");
	  Exit();
	}
    }
    
    // Remove the character
    Printf("Consumer %d removed: %c\n", getpid(), cir_buffer->space[cir_buffer->tail]);
    
    if (cir_buffer->nitem == 5)
      {
	if(cond_signal(cir_buffer->buffer_cond) != SYNC_SUCCESS)
	  {
	    Printf("Consumer conditioanl signal not full unsuccessful.\n");
	    Exit();
	  }
      }
    // Update tail and ct
    ct++;
    cir_buffer->tail = (cir_buffer->tail + 1)  % BUFFERSIZE;
    cir_buffer->nitem -= 1;

    // Release the lock
    if(lock_release(cir_buffer->buffer_lock) != SYNC_SUCCESS){
      Printf("Consumer %d release the lock %d failed !!!!!!!!!!!!!!!!!!\n", getpid(), cir_buffer->buffer_lock);
      Exit();
    }
  }



  // Signal the semaphore to tell the original process that we're done
  Printf("Consumer: PID %d is complete.\n", getpid());


  if(sem_signal(s_procs_completed) != SYNC_SUCCESS) {
    Printf("Bad semaphore s_procs_completed (%d) in ", s_procs_completed); Printf(argv[0]); Printf(", exiting...\n");
    Exit();
  }

  return;
}
Exemple #12
0
/*
 * export media from a library
 *
 * Programming note:  The parameter passed in was malloc'ed, be sure
 * to free it before thr_exit.
 */
void *
fifo_cmd_export_media(
	void *vcmd)
{
	sam_cmd_fifo_t *command = (sam_cmd_fifo_t *)vcmd;
	sam_defaults_t *defaults;
	export_request_t *request;
	dev_ent_t *device;

	/* equipment was verified in the caller */
	device = DEV_ENT(command->eq);

	defaults = GetDefaults();

	/* Must be a member of a family set or an entire robot */
	if (device->fseq) {
		message_request_t *message;

		/* The command is always sent to the robot */
		device = DEV_ENT(device->fseq);
		message =
		    (message_request_t *)SHM_REF_ADDR(device->dt.rb.message);
		request = &message->message.param.export_request;

		if (IS_ROBOT(device) &&
		    (device->status.b.ready && device->status.b.present)) {
			boolean_t issue = TRUE;

			(void) mutex_lock(&message->mutex);
			while (message->mtype != MESS_MT_VOID) {
				cond_wait(&message->cond_i, &message->mutex);
			}
			memset(&message->message, 0, sizeof (sam_message_t));
			message->message.magic = MESSAGE_MAGIC;
			message->message.command = MESS_CMD_EXPORT;
			message->message.exit_id = command->exit_id;
			request->flags = command->flags;
			request->eq = command->eq;
			request->flags &= ~EXPORT_FLAG_MASK;
			switch (command->cmd) {
			case CMD_FIFO_REMOVE_V:
				memmove(&request->vsn, &command->vsn, 32);
				set_media_to_default(
				    (media_t *)&command->media, defaults);
				request->media = command->media;
				request->slot = (uint_t)ROBOT_NO_SLOT;
				request->flags |= EXPORT_BY_VSN;
				break;

			case CMD_FIFO_REMOVE_S:
				request->slot = command->slot;
				request->flags |= EXPORT_BY_SLOT;
				break;

			case CMD_FIFO_REMOVE_E:
				request->eq = command->eq;
				request->slot = (uint_t)ROBOT_NO_SLOT;
				request->flags |= EXPORT_BY_EQ;
				break;

			default:
				sam_syslog(LOG_ERR,
				    "fifo_cmd_export_media: unknown switch"
				    "(%#x): %s:%s\n",
				    command->cmd, __FILE__, __LINE__);
				issue = FALSE;
				break;
			}

			if (issue) {
				message->mtype = MESS_MT_NORMAL;
			} else {
				message->mtype = MESS_MT_VOID;
				message->message.exit_id.pid = 0;
			}

			cond_signal(&message->cond_r);
			mutex_unlock(&message->mutex);
		}
	}
	free(command);			/* free the command buffer */
	thr_exit(NULL);
/* LINTED Function has no return statement */
}
Exemple #13
0
/*
 * tell scanner to start label process.  Runs as a thread.
 *
 * Programming note:  The parameter passed in was malloc'ed, be sure
 * to free it before thr_exit.
 */
void *
scanner_label(
	void *vcmd)
{
	sam_cmd_fifo_t *command = (sam_cmd_fifo_t *)vcmd;
	sam_defaults_t *defaults;
	dev_ent_t *device;
	message_request_t *message;

	/* equipment was verified in the caller */
	device = DEV_ENT(command->eq);
	message = (message_request_t *)SHM_REF_ADDR
	    (((shm_ptr_tbl_t *)master_shm.shared_memory)->scan_mess);
	defaults = GetDefaults();

	set_media_to_default((media_t *)& command->media, defaults);

	if (!(device->status.bits & DVST_SCANNING) &&
	    (device->status.bits & (DVST_READY | DVST_PRESENT))) {
		DTB((uchar_t *)command->vsn, sizeof (vsn_t));

		(void) mutex_lock(&message->mutex);
		while (message->mtype != MESS_MT_VOID)
			cond_wait(&message->cond_i, &message->mutex);

		(void) memset(&message->message, 0, sizeof (sam_message_t));
		message->message.magic = MESSAGE_MAGIC;
		message->message.exit_id = command->exit_id;
		message->message.command = MESS_CMD_LABEL;
		message->message.param.label_request.flags = command->flags;
		message->message.param.label_request.slot = command->eq;
		message->message.param.label_request.part = command->part;
		message->message.param.label_request.media = command->media;
		message->message.param.label_request.block_size =
		    command->block_size;

		memmove(&(message->message.param.label_request.vsn),
		    &(command->vsn), sizeof (vsn_t));
		memmove(&(message->message.param.label_request.old_vsn),
		    &(command->old_vsn), sizeof (vsn_t));
		memmove(&(message->message.param.label_request.info),
		    &(command->info), 127);
		message->mtype = MESS_MT_NORMAL;
		cond_signal(&message->cond_r);	/* wake up robot */
		mutex_unlock(&message->mutex);
	} else {
		char *msg1, *msg2;
		int		 len;

		msg1 = catgets(catfd, SET, 20009,
		    "Device not ready for labeling (eq %d)");
		/* allow some room for the equipment number to expand */
		msg2 = (char *)malloc_wait((len = (strlen(msg1) + 20)), 4, 0);
		memset(msg2, 0, len);
		sprintf(msg2, msg1, device->eq);
		write_client_exit_string(&command->exit_id, EXIT_FAILED, msg2);
		sam_syslog(LOG_WARNING, msg2);
		free(msg2);
	}

	free(command);			/* free the command buffer */
	thr_exit(NULL);
/* LINTED Function has no return statement */
}
Exemple #14
0
int crawl(char *start_url, int download_workers, int parse_workers, int queue_size,
    char *(*_fetch_fn)(char *url), void (*_edge_fn)(char *from, char *to)) {
    int i;

    bounded_buffer_t url_queue;
    unbounded_buffer_t page_queue;
    hashset_t url_set;
    bounded_buffer_init(&url_queue, queue_size);
    unbounded_buffer_init(&page_queue);
    hashset_init(&url_set, HASHSET_BUCKETS);

    bounded_buffer_put(&url_queue, (void *)str_duplicate(start_url));

    mutex_t done_mutex;
    cond_t done_cond;

    mutex_init(&done_mutex);
    cond_init(&done_cond);

    struct input_args in_args;
    in_args.url_queue = &url_queue;
    in_args.page_queue = &page_queue;
    in_args.url_set = &url_set;
    in_args.fetch = _fetch_fn;
    in_args.edge = _edge_fn;
    in_args.done_mutex = &done_mutex;
    in_args.done_cond = &done_cond;

    thread_t downloaders[download_workers];
    thread_t parsers[parse_workers];
    for (i = 0; i < download_workers; i++)
        thread_create(&downloaders[i], downloader, (void *)&in_args);
    for (i = 0; i < parse_workers; i++)
        thread_create(&parsers[i], parser, (void *)&in_args);

    while (1) {
        mutex_lock(&done_mutex);
        mutex_lock(&url_queue.mutex);
        mutex_lock(&url_queue.worker_mutex);
        mutex_lock(&page_queue.mutex);
        mutex_lock(&page_queue.worker_mutex);
        if (url_queue.count == 0 && url_queue.workers == 0 &&
            page_queue.count == 0 && page_queue.workers == 0) {
            url_queue.done = 1;
            page_queue.done = 1;
            cond_broadcast(&url_queue.empty);
            cond_broadcast(&url_queue.fill);
            cond_broadcast(&page_queue.fill);
            mutex_unlock(&url_queue.mutex);
            mutex_unlock(&url_queue.worker_mutex);
            mutex_unlock(&page_queue.mutex);
            mutex_unlock(&page_queue.worker_mutex);
            mutex_unlock(&done_mutex);
            break;
        } else {
            mutex_unlock(&url_queue.mutex);
            mutex_unlock(&url_queue.worker_mutex);
            mutex_unlock(&page_queue.mutex);
            mutex_unlock(&page_queue.worker_mutex);
            cond_wait(&done_cond, &done_mutex);
            mutex_unlock(&done_mutex);
        }
    }

    for (i = 0; i < download_workers; i++)
        thread_join(downloaders[i], NULL);
    for (i = 0; i < parse_workers; i++)
        thread_join(parsers[i], NULL);

    bounded_buffer_destroy(&url_queue);
    unbounded_buffer_destroy(&page_queue);
    hashset_destroy(&url_set);

    return 0;
}
Exemple #15
0
int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut,
			   const struct timespec *to)
{
	return cond_wait(cv, mut, _ts_to_ms(to));
}
Exemple #16
0
int
__nisdb_rlock(__nisdb_rwlock_t *rw) {

	int		ret;
	pthread_t	myself = pthread_self();
	__nisdb_rl_t	*rr;

	if (rw == 0) {
#ifdef	NISDB_MT_DEBUG
		/* This shouldn't happen */
		abort();
#endif	/* NISDB_MT_DEBUG */
		return (EFAULT);
	}

	if (rw->destroyed != 0)
		return (ESHUTDOWN);

	if (rw->force_write)
		return (__nisdb_wlock(rw));

	if ((ret = mutex_lock(&rw->mutex)) != 0)
		return (ret);

	if (rw->destroyed != 0) {
		(void) mutex_unlock(&rw->mutex);
		return (ESHUTDOWN);
	}

	rr = find_reader(myself, rw);

	/* Wait for writer to complete; writer == myself also OK */
	while (rw->writer_count > 0 && rw->writer.id != myself) {
		if (rr != 0) {
			rr->wait = 1;
			rw->reader_blocked++;
		}
		if ((ret = cond_wait(&rw->cv, &rw->mutex)) != 0) {
			if (rr != 0) {
				rr->wait = 0;
				if (rw->reader_blocked > 0)
					rw->reader_blocked--;
#ifdef	NISDB_MT_DEBUG
				else
					abort();
#endif	/* NISDB_MT_DEBUG */
			}
			(void) mutex_unlock(&rw->mutex);
			return (ret);
		}
		if (rr != 0) {
			rr->wait = 0;
			if (rw->reader_blocked > 0)
				rw->reader_blocked--;
#ifdef	NISDB_MT_DEBUG
			else
				abort();
#endif	/* NISDB_MT_DEBUG */
		}
	}

	rr = increment_reader(myself, rw);
	ret = mutex_unlock(&rw->mutex);
	return ((rr == 0) ? ENOMEM : ret);
}
Exemple #17
0
int main(int argc, char **argv) {
  pid_t child_pid; /* child process id */
  pthread_t monitor_thread; /* monitor thread */
  char *args[CMD_ARGS_MAX+1], buffer_cmd[LINE_COMMAND_MAX]; /* cmd related */
  char buffer_log[LINE_LOGFILE_MAX]; /* log related */

  if((g_lst_children = lst_new()) == NULL) {
    if(fprintf(stderr, "lst_new: couldn't create list\n") < 0)
      handle_error("fprintf");
    exit(EXIT_FAILURE);
  }

  g_log_file = f_open(PATH_LOGFILE_STR, "a+");

  while(!feof(g_log_file)) {
    f_gets(buffer_log, LINE_LOGFILE_MAX, g_log_file); /* NULL or iteracao # */
    if(feof(g_log_file)) break;
    if(sscanf(buffer_log, "%*s %d", &g_iterations) != 1)
      handle_error("sscanf");
    
    f_gets(buffer_log, LINE_LOGFILE_MAX, g_log_file); /* PID: # time: # s */
    f_gets(buffer_log, LINE_LOGFILE_MAX, g_log_file); /* total time: # s */
    if(sscanf(buffer_log, "%*[^0-9] %d", &g_total_time) != 1)
      handle_error("sscanf");
  }
  
  ++g_iterations;
  
  cond_init(&g_child_cv);
  cond_init(&g_monitoring_cv);
  mutex_init(&g_mutex);
  pcreate(&monitor_thread, process_monitor, NULL);
  
  while(true) {
    int numargs = readLineArguments(args,
      CMD_ARGS_MAX + 1, buffer_cmd, LINE_COMMAND_MAX);
    
    if(args[0] == NULL) continue;
    if(numargs < 0 || (numargs > 0 && !strcmp(args[0], COMMAND_EXIT_STR))) {
      
      mutex_lock(&g_mutex);
      g_monitoring = false;
      cond_signal(&g_monitoring_cv);
      mutex_unlock(&g_mutex);
      
      pjoin(monitor_thread);
      
      lst_print(g_lst_children);
      
      destroySharedResources();
      f_close(g_log_file);
      return EXIT_SUCCESS;
    }
    else {
      FILE *fp; /* To check file existance */
      if ((fp = fopen(args[0], "r")) == NULL)
        perror(args[0]);
      else {
        f_close(fp);
        
        mutex_lock(&g_mutex);
        while(g_num_children == MAXPAR)
          cond_wait(&g_child_cv, &g_mutex);
        mutex_unlock(&g_mutex);
        
        if((child_pid = fork()) < 0) perror("fork");
        else if(child_pid == 0) {
          execv(args[0],args);
          
          destroySharedResources();
          handle_error("execv");
        }
        else {
          mutex_lock(&g_mutex);
          if(insert_new_process(g_lst_children, child_pid, time(NULL)) != 0) {
            fprintf(stderr,
              "insert_new_process: failed to insert new process\n");
            exit(EXIT_FAILURE);
          }
          ++g_num_children;
          
          cond_signal(&g_monitoring_cv);
          mutex_unlock(&g_mutex);
        }
      }
    }
  }
}
Exemple #18
0
static void *worker_loop(Worker *t) {
    int i;
    enum worker_transaction_states state;

    lock();

    if (!heap_isempty(worker_ready_to_run)) {
        /* wait for older threads that are ready to run */
        worker_wake_up_next();
        heap_add(worker_ready_to_run, t);
        cond_wait(t->sleep);
    }

    /* let threads expire after a while so the thread pool can grow and shrink
     * as needed without hard limits on the number of threads */
    for (i = 0; i < THREAD_LIFETIME; i++) {
        if (i > 0) {
            /* wait in the pool for a request */
            worker_thread_pool = append_elt(worker_thread_pool, t);
            cond_wait(t->sleep);
        }
        worker_active++;

        /* initialize the exception handler and catch exceptions */
        do {
            state = setjmp(t->jmp);

            if (state == WORKER_BLOCKED) {
                /* wait for the blocking transaction to finish,
                 * then start over */
                worker_cleanup(t);
                worker_wake_up_next();
                cond_wait(t->sleep);
            } else if (state == WORKER_MULTISTEP) {
                /* wait for the next step in this grant/revoke op to wake us up
                 * with more work to do */
                if (DEBUG_VERBOSE)
                    printf("WORKER_MULTISTEP sleeping\n");
                worker_cleanup(t);
                t->func = NULL;
                t->arg = NULL;
                worker_wake_up_next();
                while (t->func == NULL)
                    cond_wait(t->sleep);
                if (DEBUG_VERBOSE)
                    printf("WORKER_MULTISTEP waking up\n");
            } else if (state == WORKER_RETRY) {
                worker_cleanup(t);
            }
        } while (state != WORKER_ZERO);

        /* process the request */
        if (t->func != NULL)
            t->func(t, t->arg);
        worker_cleanup(t);
        t->func = NULL;

        /* make anyone waiting on this transaction ready to run, then run one */
        if (!null(t->blocking)) {
            for ( ; !null(t->blocking); t->blocking = cdr(t->blocking))
                heap_add(worker_ready_to_run, car(t->blocking));
        }

        worker_active--;
        worker_wake_up_next();
    }

    unlock();

    return NULL;
}
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
	return cond_wait(cond, mutex);
}
Exemple #20
0
/* Wait on the condition */
int
__objc_condition_wait(objc_condition_t condition, objc_mutex_t mutex)
{
  return cond_wait((cond_t *)(&(condition->backend)),
		   (mutex_t *)(&(mutex->backend)));
}
Exemple #21
0
int HID_API_EXPORT hid_read_timeout(hid_device *dev, unsigned char *data, size_t length, int milliseconds)
{
	int bytes_read = -1;

	/* Lock the access to the report list. */
	pthread_mutex_lock(&dev->mutex);
	
	/* There's an input report queued up. Return it. */
	if (dev->input_reports) {
		/* Return the first one */
		bytes_read = return_data(dev, data, length);
		goto ret;
	}

	/* Return if the device has been disconnected. */
	if (dev->disconnected) {
		bytes_read = -1;
		goto ret;
	}
	
	if (dev->shutdown_thread) {
		/* This means the device has been closed (or there
		   has been an error. An error code of -1 should
		   be returned. */
		bytes_read = -1;
		goto ret;
	}

	/* There is no data. Go to sleep and wait for data. */
	
	if (milliseconds == -1) {
		/* Blocking */
		int res;
		res = cond_wait(dev, &dev->condition, &dev->mutex);
		if (res == 0)
			bytes_read = return_data(dev, data, length);
		else {
			/* There was an error, or a device disconnection. */
			bytes_read = -1;
		}
	}
	else if (milliseconds > 0) {
		/* Non-blocking, but called with timeout. */
		int res;
		struct timespec ts;
		struct timeval tv;
		gettimeofday(&tv, NULL);
		TIMEVAL_TO_TIMESPEC(&tv, &ts);
		ts.tv_sec += milliseconds / 1000;
		ts.tv_nsec += (milliseconds % 1000) * 1000000;
		if (ts.tv_nsec >= 1000000000L) {
			ts.tv_sec++;
			ts.tv_nsec -= 1000000000L;
		}
		
		res = cond_timedwait(dev, &dev->condition, &dev->mutex, &ts);
		if (res == 0)
			bytes_read = return_data(dev, data, length);
		else if (res == ETIMEDOUT)
			bytes_read = 0;
		else
			bytes_read = -1;
	}
	else {
		/* Purely non-blocking */
		bytes_read = 0;
	}

ret:
	/* Unlock */
	pthread_mutex_unlock(&dev->mutex);
	return bytes_read;
}
static void
/*ARGSUSED*/
lxt_server(void *cookie, char *argp, size_t request_size,
    door_desc_t *dp, uint_t n_desc)
{
	/*LINTED*/
	lxt_server_arg_t	*request = (lxt_server_arg_t *)argp;
	lxt_req_t		lxt_req;
	char			*door_path = cookie;

	/* Check if there's no callers left */
	if (argp == DOOR_UNREF_DATA) {
		(void) fdetach(door_path);
		(void) unlink(door_path);
		lx_debug("lxt_thunk_server: no clients, exiting");
		exit(0);
	}

	/* Sanity check the incomming request. */
	if (request_size < sizeof (*request)) {
		/* the lookup failed */
		lx_debug("lxt_thunk_server: invalid request size");
		(void) door_return(NULL, 0, NULL, 0);
		return;
	}

	if ((request->lxt_sa_op < LXT_SERVER_OP_MIN) ||
	    (request->lxt_sa_op > LXT_SERVER_OP_MAX)) {
		lx_debug("lxt_thunk_server: invalid request op");
		(void) door_return(NULL, 0, NULL, 0);
		return;
	}

	/* Handle ping requests immediatly, return here. */
	if (request->lxt_sa_op == LXT_SERVER_OP_PING) {
		lx_debug("lxt_thunk_server: handling ping request");
		request->lxt_sa_success = 1;
		(void) door_return((char *)request, request_size, NULL, 0);
		return;
	}

	lx_debug("lxt_thunk_server: hand off request to Linux thread, "
	    "request = 0x%p", request);

	/* Pack the request up so we can pass it to a Linux thread. */
	lxt_req.lxtr_request = request;
	lxt_req.lxtr_request_size = request_size;
	lxt_req.lxtr_result = NULL;
	lxt_req.lxtr_result_size = 0;
	lxt_req.lxtr_complete = 0;
	(void) cond_init(&lxt_req.lxtr_complete_cv, USYNC_THREAD, NULL);

	/* Pass the request onto a Linux thread. */
	(void) mutex_lock(&lxt_req_lock);
	while (lxt_req_ptr != NULL)
		(void) cond_wait(&lxt_req_cv, &lxt_req_lock);
	lxt_req_ptr = &lxt_req;
	(void) cond_broadcast(&lxt_req_cv);

	/* Wait for the request to be completed. */
	while (lxt_req.lxtr_complete == 0)
		(void) cond_wait(&lxt_req.lxtr_complete_cv, &lxt_req_lock);
	assert(lxt_req_ptr != &lxt_req);
	(void) mutex_unlock(&lxt_req_lock);

	lx_debug("lxt_thunk_server: hand off request completed, "
	    "request = 0x%p", request);

	/*
	 * If door_return() is successfull it never returns, so if we made
	 * it here there was some kind of error, but there's nothing we can
	 * really do about it.
	 */
	(void) door_return(
	    lxt_req.lxtr_result, lxt_req.lxtr_result_size, NULL, 0);
}
Exemple #23
0
/*
 *	view_media - view a database entry
 */
req_comp_t
view_media(
	library_t *library,
	char *vsn,
	void **ret_data)
{
	req_comp_t 	err;
	ibm_req_info_t 	*ibm_info;
	xport_state_t 	*transport;
	robo_event_t 	*view, *tmp;

	ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0);
	memset(ibm_info, 0, sizeof (ibm_req_info_t));
	ibm_info->sub_cmd = MT_QEVD;	/* view a single data base entry */
	sprintf((void *)&ibm_info->volser, "%-8.8s", vsn);

	/* Build transport thread request */
	view = malloc_wait(sizeof (robo_event_t), 5, 0);
	(void) memset(view, 0, sizeof (robo_event_t));
	view->request.internal.command = ROBOT_INTRL_VIEW_DATABASE;
	view->request.internal.address = (void *)ibm_info;

	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG, "view_media: %s.", vsn);

	view->type = EVENT_TYPE_INTERNAL;
	view->status.bits = REST_SIGNAL;
	view->completion = REQUEST_NOT_COMPLETE;

	transport = library->transports;

	mutex_lock(&view->mutex);
	mutex_lock(&transport->list_mutex);
	if (transport->active_count == 0)
		transport->first = view;
	else {
		LISTEND(transport, tmp);
		append_list(tmp, view);
	}
	transport->active_count++;
	cond_signal(&transport->list_condit);
	mutex_unlock(&transport->list_mutex);

	/* Wait for the transport to do the request */
	while (view->completion == REQUEST_NOT_COMPLETE)
		cond_wait(&view->condit, &view->mutex);
	mutex_unlock(&view->mutex);

	err = (req_comp_t)view->completion;
	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG, "Return from view (%#x).",
		    view->completion);

	if (!err) {
		*ret_data = malloc_wait(sizeof (IBM_query_info_t), 2, 0);
		memcpy(*ret_data, ibm_info->ret_data,
		    sizeof (IBM_query_info_t));
	} else
		*ret_data = NULL;

	free(ibm_info);
	mutex_destroy(&view->mutex);
	free(view);
	return (err);
}
Exemple #24
0
int
fscache_unmount(cfsd_fscache_object_t *fscache_object_p, int flag)
{
	int xx;
	int ret = 0;

	dbug_enter("fscache_unmount");
	dbug_precond(fscache_object_p);

	fscache_lock(fscache_object_p);

	/* if there is a thread running */
	if (fscache_object_p->i_threaded) {
		/* do not bother unmounting if rolling the log */
		if (fscache_object_p->i_reconcile) {
			ret = EBUSY;
			goto out;
		}

		/* inform the thread to try the unmount */
		fscache_object_p->i_tryunmount = 1;
		fscache_object_p->i_modify++;

		/* get the attention of the thread */
		dbug_print(("info", "about to do umount kill"));
		xx = thr_kill(fscache_object_p->i_threadid, SIGUSR1);
		if (xx) {
			dbug_print(("error", "thr_kill failed %d, threadid %d",
			    xx, fscache_object_p->i_threadid));
			ret = EIO;
			goto out;
		}

		/* wait for the thread to wake us up */
		while (fscache_object_p->i_tryunmount) {
			xx = cond_wait(&fscache_object_p->i_cvwait,
			    &fscache_object_p->i_lock);
			dbug_print(("info", "cond_wait woke up %d %d",
			    xx, fscache_object_p->i_tryunmount));
		}

		/* if the file system is still mounted */
		if (fscache_object_p->i_mounted)
			ret = EBUSY;
	}

	/* else if there is no thread running */
	else {
		/* try to unmount the file system */
		if (umount2(fscache_object_p->i_mntpt, flag) == -1) {
			xx = errno;
			dbug_print(("info", "unmount failed %s",
			    strerror(xx)));
			if (xx == EBUSY)
				ret = EBUSY;
			else if (xx == ENOTSUP)
				ret = ENOTSUP;
			else
				ret = EIO;
		} else {
			fscache_object_p->i_mounted = 0;
			fscache_object_p->i_modify++;
		}
	}
out:
	fscache_unlock(fscache_object_p);
	dbug_leave("fscache_unmount");
	return (ret);
}
Exemple #25
0
/*
 *	set_media_category - change the category of media.
 */
req_comp_t
set_media_category(
	library_t *library,
	char *volser,
	ushort_t src_cat,	/* source category */
	ushort_t targ_cat)
{
	req_comp_t	err;
	ibm_req_info_t 	*ibm_info;
	xport_state_t 	*transport;
	robo_event_t 	*set, *tmp;

	ibm_info = malloc_wait(sizeof (ibm_req_info_t), 2, 0);
	memset(ibm_info, 0, sizeof (ibm_req_info_t));
	ibm_info->targ_cat = targ_cat;
	ibm_info->src_cat = src_cat;
	sprintf((void *)&ibm_info->volser, "%-8.8s", volser);

	/* Build transport thread request */
	set = malloc_wait(sizeof (robo_event_t), 5, 0);
	(void) memset(set, 0, sizeof (robo_event_t));
	set->request.internal.command = ROBOT_INTRL_SET_CATEGORY;
	set->request.internal.address = (void *)ibm_info;

	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG,
		    "set_media_category: %s %#x->%#x.", volser, src_cat,
		    targ_cat);

	set->type = EVENT_TYPE_INTERNAL;
	set->status.bits = REST_SIGNAL;
	set->completion = REQUEST_NOT_COMPLETE;
	transport = library->transports;

	mutex_lock(&set->mutex);
	mutex_lock(&transport->list_mutex);
	if (transport->active_count == 0)
		transport->first = set;
	else {
		LISTEND(transport, tmp);
		append_list(tmp, set);
	}
	transport->active_count++;
	cond_signal(&transport->list_condit);
	mutex_unlock(&transport->list_mutex);

	/* Wait for the transport to do the request */
	while (set->completion == REQUEST_NOT_COMPLETE)
		cond_wait(&set->condit, &set->mutex);
	mutex_unlock(&set->mutex);

	err = (req_comp_t)set->completion;
	if (DBG_LVL(SAM_DBG_TMOVE))
		sam_syslog(LOG_DEBUG, "Return from set (%#x).",
		    set->completion);

	free(ibm_info);
	mutex_destroy(&set->mutex);
	free(set);
	return (err);
}
Exemple #26
0
static enum clnt_stat
clnt_vc_call(
	CLIENT *h,
	rpcproc_t proc,
	xdrproc_t xdr_args,
	const char *args_ptr,
	xdrproc_t xdr_results,
	caddr_t results_ptr,
	struct timeval timeout
)
{
	struct ct_data *ct;
	XDR *xdrs;
	struct rpc_msg reply_msg;
	u_int32_t x_id;
	u_int32_t *msg_x_id;
	bool_t shipnow;
	int refreshes = 2;
#ifdef _REENTRANT
	sigset_t mask, newmask;
#endif

	_DIAGASSERT(h != NULL);

	ct = (struct ct_data *) h->cl_private;

#ifdef _REENTRANT
	__clnt_sigfillset(&newmask);
	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
	mutex_lock(&clnt_fd_lock);
	while (vc_fd_locks[ct->ct_fd])
		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
	vc_fd_locks[ct->ct_fd] = __rpc_lock_value;
	mutex_unlock(&clnt_fd_lock);
#endif

	xdrs = &(ct->ct_xdrs);
	msg_x_id = &ct->ct_u.ct_mcalli;

	if (!ct->ct_waitset) {
		if (time_not_ok(&timeout) == FALSE)
		ct->ct_wait = timeout;
	}

	shipnow =
	    (xdr_results == NULL && timeout.tv_sec == 0
	    && timeout.tv_usec == 0) ? FALSE : TRUE;

call_again:
	xdrs->x_op = XDR_ENCODE;
	ct->ct_error.re_status = RPC_SUCCESS;
	x_id = ntohl(--(*msg_x_id));
	if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) ||
	    (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
	    (! AUTH_MARSHALL(h->cl_auth, xdrs)) ||
	    (! (*xdr_args)(xdrs, __UNCONST(args_ptr)))) {
		if (ct->ct_error.re_status == RPC_SUCCESS)
			ct->ct_error.re_status = RPC_CANTENCODEARGS;
		(void)xdrrec_endofrecord(xdrs, TRUE);
		release_fd_lock(ct->ct_fd, mask);
		return (ct->ct_error.re_status);
	}
	if (! xdrrec_endofrecord(xdrs, shipnow)) {
		release_fd_lock(ct->ct_fd, mask);
		return (ct->ct_error.re_status = RPC_CANTSEND);
	}
	if (! shipnow) {
		release_fd_lock(ct->ct_fd, mask);
		return (RPC_SUCCESS);
	}
	/*
	 * Hack to provide rpc-based message passing
	 */
	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
		release_fd_lock(ct->ct_fd, mask);
		return(ct->ct_error.re_status = RPC_TIMEDOUT);
	}


	/*
	 * Keep receiving until we get a valid transaction id
	 */
	xdrs->x_op = XDR_DECODE;
	for (;;) {
		reply_msg.acpted_rply.ar_verf = _null_auth;
		reply_msg.acpted_rply.ar_results.where = NULL;
		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
		if (! xdrrec_skiprecord(xdrs)) {
			release_fd_lock(ct->ct_fd, mask);
			return (ct->ct_error.re_status);
		}
		/* now decode and validate the response header */
		if (! xdr_replymsg(xdrs, &reply_msg)) {
			if (ct->ct_error.re_status == RPC_SUCCESS)
				continue;
			release_fd_lock(ct->ct_fd, mask);
			return (ct->ct_error.re_status);
		}
		if (reply_msg.rm_xid == x_id)
			break;
	}

	/*
	 * process header
	 */
	_seterr_reply(&reply_msg, &(ct->ct_error));
	if (ct->ct_error.re_status == RPC_SUCCESS) {
		if (! AUTH_VALIDATE(h->cl_auth,
		    &reply_msg.acpted_rply.ar_verf)) {
			ct->ct_error.re_status = RPC_AUTHERROR;
			ct->ct_error.re_why = AUTH_INVALIDRESP;
		} else if (! (*xdr_results)(xdrs, results_ptr)) {
			if (ct->ct_error.re_status == RPC_SUCCESS)
				ct->ct_error.re_status = RPC_CANTDECODERES;
		}
		/* free verifier ... */
		if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
			xdrs->x_op = XDR_FREE;
			(void)xdr_opaque_auth(xdrs,
			    &(reply_msg.acpted_rply.ar_verf));
		}
	}  /* end successful completion */
	else {
		/* maybe our credentials need to be refreshed ... */
		if (refreshes-- && AUTH_REFRESH(h->cl_auth))
			goto call_again;
	}  /* end of unsuccessful completion */
	release_fd_lock(ct->ct_fd, mask);
	return (ct->ct_error.re_status);
}