Exemple #1
0
// Initialize and start PSL thread
//
// The return value is encode int a 16-bit value divided into 4 for each
// possible adapter.  Then the 4 bits in each adapter represent the 4 possible
// AFUs on an adapter.  For example: afu0.0 is 0x8000 and afu3.0 is 0x0008.
uint16_t psl_init(struct psl **head, struct parms *parms, char *id, char *host,
		  int port, pthread_mutex_t * lock, FILE * dbg_fp)
{
	struct psl *psl;
	struct job_event *reset;
	uint16_t location;

	location = 0x8000;
	if ((psl = (struct psl *)calloc(1, sizeof(struct psl))) == NULL) {
		perror("malloc");
		error_msg("Unable to allocation memory for psl");
		goto init_fail;
	}
	psl->timeout = parms->timeout;
	if ((strlen(id) != 6) || strncmp(id, "afu", 3) || (id[4] != '.')) {
		warn_msg("Invalid afu name: %s", id);
		goto init_fail;
	}
	if ((id[3] < '0') || (id[3] > '3')) {
		warn_msg("Invalid afu major: %c", id[3]);
		goto init_fail;
	}
	if ((id[5] < '0') || (id[5] > '3')) {
		warn_msg("Invalid afu minor: %c", id[5]);
		goto init_fail;
	}
	psl->dbg_fp = dbg_fp;
	psl->major = id[3] - '0';
	psl->minor = id[5] - '0';
	psl->dbg_id = psl->major << 4;
	psl->dbg_id |= psl->minor;
	location >>= (4 * psl->major);
	location >>= psl->minor;
	if ((psl->name = (char *)malloc(strlen(id) + 1)) == NULL) {
		perror("malloc");
		error_msg("Unable to allocation memory for psl->name");
		goto init_fail;
	}
	strcpy(psl->name, id);
	if ((psl->host = (char *)malloc(strlen(host) + 1)) == NULL) {
		perror("malloc");
		error_msg("Unable to allocation memory for psl->host");
		goto init_fail;
	}
	strcpy(psl->host, host);
	psl->port = port;
	psl->client = NULL;
	psl->idle_cycles = PSL_IDLE_CYCLES;
	psl->lock = lock;

	// Connect to AFU
	psl->afu_event = (struct AFU_EVENT *)malloc(sizeof(struct AFU_EVENT));
	if (psl->afu_event == NULL) {
		perror("malloc");
		goto init_fail;
	}
	info_msg("Attempting to connect AFU: %s @ %s:%d", psl->name,
		 psl->host, psl->port);
	if (psl_init_afu_event(psl->afu_event, psl->host, psl->port) !=
	    PSL_SUCCESS) {
		warn_msg("Unable to connect AFU: %s @ %s:%d", psl->name,
			 psl->host, psl->port);
		goto init_fail;
	}
	// DEBUG
	debug_afu_connect(psl->dbg_fp, psl->dbg_id);

	// Initialize job handler
	if ((psl->job = job_init(psl->afu_event, &(psl->state), psl->name,
				 psl->dbg_fp, psl->dbg_id)) == NULL) {
		perror("job_init");
		goto init_fail;
	}
	// Initialize mmio handler
	if ((psl->mmio = mmio_init(psl->afu_event, psl->timeout, psl->name,
				   psl->dbg_fp, psl->dbg_id)) == NULL) {
		perror("mmio_init");
		goto init_fail;
	}
	// Initialize cmd handler
	if ((psl->cmd = cmd_init(psl->afu_event, parms, psl->mmio,
				 &(psl->state), psl->name, psl->dbg_fp,
				 psl->dbg_id))
	    == NULL) {
		perror("cmd_init");
		goto init_fail;
	}
	// Set credits for AFU
	if (psl_aux1_change(psl->afu_event, psl->cmd->credits) != PSL_SUCCESS) {
		warn_msg("Unable to set credits");
		goto init_fail;
	}
	// Start psl loop thread
	if (pthread_create(&(psl->thread), NULL, _psl_loop, psl)) {
		perror("pthread_create");
		goto init_fail;
	}
	// Add psl to list
	while ((*head != NULL) && ((*head)->major < psl->major)) {
		head = &((*head)->_next);
	}
	while ((*head != NULL) && ((*head)->major == psl->major) &&
	       ((*head)->minor < psl->minor)) {
		head = &((*head)->_next);
	}
	psl->_next = *head;
	if (psl->_next != NULL)
		psl->_next->_prev = psl;
	*head = psl;

	// Send reset to AFU
	reset = add_job(psl->job, PSL_JOB_RESET, 0L);
	while (psl->job->job == reset) {	/*infinite loop */
		lock_delay(psl->lock);
	}

	// Read AFU descriptor
	psl->state = PSLSE_DESC;
	read_descriptor(psl->mmio, psl->lock);

	// Finish PSL configuration
	psl->state = PSLSE_IDLE;
	if (dedicated_mode_support(psl->mmio)) {
		// AFU supports Dedicated Mode
		psl->max_clients = 1;
	}
	if (directed_mode_support(psl->mmio)) {
		// AFU supports Directed Mode
		psl->max_clients = psl->mmio->desc.num_of_processes;
	}
	if (psl->max_clients == 0) {
		error_msg("AFU programming model is invalid");
		goto init_fail;
	}
	psl->client = (struct client **)calloc(psl->max_clients,
					       sizeof(struct client *));
	psl->cmd->client = psl->client;
	psl->cmd->max_clients = psl->max_clients;

	return location;

 init_fail:
	if (psl) {
		if (psl->afu_event) {
			psl_close_afu_event(psl->afu_event);
			free(psl->afu_event);
		}
		if (psl->host)
			free(psl->host);
		if (psl->name)
			free(psl->name);
		free(psl);
	}
	pthread_mutex_unlock(lock);
	return 0;
}
Exemple #2
0
void
thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid;
	struct lock_delay_arg lda;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	int64_t spin_time = 0;
#endif

	tid = (uintptr_t)curthread;

	if (SCHEDULER_STOPPED()) {
		/*
		 * Ensure that spinlock sections are balanced even when the
		 * scheduler is stopped, since we may otherwise inadvertently
		 * re-enable interrupts while dumping core.
		 */
		spinlock_enter();
		return;
	}

	lock_delay_arg_init(&lda, &mtx_spin_delay);

#ifdef KDTRACE_HOOKS
	spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
#endif
	for (;;) {
retry:
		spinlock_enter();
		m = td->td_lock;
		KASSERT(m->mtx_lock != MTX_DESTROYED,
		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
		    ("thread_lock() of sleep mutex %s @ %s:%d",
		    m->lock_object.lo_name, file, line));
		if (mtx_owned(m))
			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
			    m->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&m->lock_object,
		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
		for (;;) {
			if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
				break;
			if (m->mtx_lock == tid) {
				m->mtx_recurse++;
				break;
			}
#ifdef HWPMC_HOOKS
			PMC_SOFT_CALL( , , lock, failed);
#endif
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			while (m->mtx_lock != MTX_UNOWNED) {
				if (lda.spin_cnt < 10000000) {
					lock_delay(&lda);
				} else {
					lda.spin_cnt++;
					if (lda.spin_cnt < 60000000 ||
					    kdb_active || panicstr != NULL)
						DELAY(1);
					else
						_mtx_lock_spin_failed(m);
					cpu_spinwait();
				}
				if (m != td->td_lock)
					goto retry;
			}
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		__mtx_unlock_spin(m);	/* does spinlock_exit() */
	}
#ifdef KDTRACE_HOOKS
	spin_time += lockstat_nsecs(&m->lock_object);
#endif
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
		    contested, waittime, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
#ifdef KDTRACE_HOOKS
	if (spin_time != 0)
		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
#endif
}
Exemple #3
0
// PSL thread loop
static void *_psl_loop(void *ptr)
{
	struct psl *psl = (struct psl *)ptr;
	struct cmd_event *event, *temp;
	int events, i, stopped, reset;
	uint8_t ack = PSLSE_DETACH;

	stopped = 1;
	pthread_mutex_lock(psl->lock);
	while (psl->state != PSLSE_DONE) {
		// idle_cycles continues to generate clock cycles for some
		// time after the AFU has gone idle.  Eventually clocks will
		// not be presented to an idle AFU to keep simulation
		// waveforms from getting huge with no activity cycles.
		if (psl->state != PSLSE_IDLE) {
			psl->idle_cycles = PSL_IDLE_CYCLES;
			if (stopped)
				info_msg("Clocking %s", psl->name);
			fflush(stdout);
			stopped = 0;
		}

		if (psl->idle_cycles) {
			// Clock AFU
			psl_signal_afu_model(psl->afu_event);
			// Check for events from AFU
			events = psl_get_afu_events(psl->afu_event);

			// Error on socket
			if (events < 0) {
				warn_msg("Lost connection with AFU");
				break;
			}
			// Handle events from AFU
			if (events > 0)
				_handle_afu(psl);

			// Drive events to AFU
			send_job(psl->job);
			send_mmio(psl->mmio);

			if (psl->mmio->list == NULL)
				psl->idle_cycles--;
		} else {
			if (!stopped)
				info_msg("Stopping clocks to %s", psl->name);
			stopped = 1;
			lock_delay(psl->lock);
		}

		// Skip client section if AFU descriptor hasn't been read yet
		if (psl->client == NULL) {
			lock_delay(psl->lock);
			continue;
		}
		// Check for event from application
		reset = 0;
		for (i = 0; i < psl->max_clients; i++) {
			if (psl->client[i] == NULL)
				continue;
			if ((psl->client[i]->state == CLIENT_NONE) &&
			    (psl->client[i]->idle_cycles == 0)) {
				put_bytes(psl->client[i]->fd, 1, &ack,
					  psl->dbg_fp, psl->dbg_id,
					  psl->client[i]->context);
				_free(psl, psl->client[i]);
				psl->client[i] = NULL;
				reset = 1;
				continue;
			}
			if (psl->state == PSLSE_RESET)
				continue;
			_handle_client(psl, psl->client[i]);
			if (psl->client[i]->idle_cycles) {
				psl->client[i]->idle_cycles--;
			}
			if (client_cmd(psl->cmd, psl->client[i])) {
				psl->client[i]->idle_cycles = PSL_IDLE_CYCLES;
			}
		}

		// Send reset to AFU
		if (reset == 1) {
			psl->cmd->buffer_read = NULL;
			event = psl->cmd->list;
			while (event != NULL) {
				if (reset) {
					warn_msg
					    ("Client dropped context before AFU completed");
					reset = 0;
				}
				warn_msg("Dumping command tag=0x%02x",
					 event->tag);
				if (event->data) {
					free(event->data);
				}
				if (event->parity) {
					free(event->parity);
				}
				temp = event;
				event = event->_next;
				free(temp);
			}
			psl->cmd->list = NULL;
			info_msg("Sending reset to AFU");
			add_job(psl->job, PSL_JOB_RESET, 0L);
		}

		lock_delay(psl->lock);
	}

	// Disconnect clients
	for (i = 0; i < psl->max_clients; i++) {
		if ((psl->client != NULL) && (psl->client[i] != NULL)) {
			// FIXME: Send warning to clients first?
			info_msg("Disconnecting %s context %d", psl->name,
				 psl->client[i]->context);
			close_socket(&(psl->client[i]->fd));
		}
	}

	// DEBUG
	debug_afu_drop(psl->dbg_fp, psl->dbg_id);

	// Disconnect from simulator, free memory and shut down thread
	info_msg("Disconnecting %s @ %s:%d", psl->name, psl->host, psl->port);
	if (psl->client)
		free(psl->client);
	if (psl->_prev)
		psl->_prev->_next = psl->_next;
	if (psl->_next)
		psl->_next->_prev = psl->_prev;
	if (psl->cmd) {
		free(psl->cmd);
	}
	if (psl->job) {
		free(psl->job);
	}
	if (psl->mmio) {
		free(psl->mmio);
	}
	if (psl->host)
		free(psl->host);
	if (psl->afu_event) {
		psl_close_afu_event(psl->afu_event);
		free(psl->afu_event);
	}
	if (psl->name)
		free(psl->name);
	if (*(psl->head) == psl)
		*(psl->head) = psl->_next;
	pthread_mutex_unlock(psl->lock);
	free(psl);
	pthread_exit(NULL);
}
Exemple #4
0
/*
 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
 *
 * We call this if the lock is either contested (i.e. we need to go to
 * sleep waiting for it), or if we need to recurse on it.
 */
void
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
    const char *file, int line)
{
	struct mtx *m;
	struct turnstile *ts;
	uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
	volatile struct thread *owner;
#endif
#ifdef KTR
	int cont_logged = 0;
#endif
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
	struct lock_delay_arg lda;
#endif
#ifdef KDTRACE_HOOKS
	u_int sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return;

#if defined(ADAPTIVE_MUTEXES)
	lock_delay_arg_init(&lda, &mtx_delay);
#elif defined(KDTRACE_HOOKS)
	lock_delay_arg_init(&lda, NULL);
#endif
	m = mtxlock2mtx(c);

	if (mtx_owned(m)) {
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0,
	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
		opts &= ~MTX_RECURSE;
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		if (LOCK_LOG_TEST(&m->lock_object, opts))
			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
		return;
	}
	opts &= ~MTX_RECURSE;

#ifdef HWPMC_HOOKS
	PMC_SOFT_CALL( , , lock, failed);
#endif
	lock_profile_obtain_lock_failed(&m->lock_object,
		    &contested, &waittime);
	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR4(KTR_LOCK,
		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
#ifdef KDTRACE_HOOKS
	all_time -= lockstat_nsecs(&m->lock_object);
#endif

	for (;;) {
		if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
			break;
#ifdef KDTRACE_HOOKS
		lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_MUTEXES
		/*
		 * If the owner is running on another CPU, spin until the
		 * owner stops running or the state of the lock changes.
		 */
		v = m->mtx_lock;
		if (v != MTX_UNOWNED) {
			owner = (struct thread *)(v & ~MTX_FLAGMASK);
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&m->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, m, owner);
				KTR_STATE1(KTR_SCHED, "thread",
				    sched_tdname((struct thread *)tid),
				    "spinning", "lockname:\"%s\"",
				    m->lock_object.lo_name);
				while (mtx_owner(m) == owner &&
				    TD_IS_RUNNING(owner))
					lock_delay(&lda);
				KTR_STATE0(KTR_SCHED, "thread",
				    sched_tdname((struct thread *)tid),
				    "running");
				continue;
			}
		}
#endif

		ts = turnstile_trywait(&m->lock_object);
		v = m->mtx_lock;

		/*
		 * Check if the lock has been released while spinning for
		 * the turnstile chain lock.
		 */
		if (v == MTX_UNOWNED) {
			turnstile_cancel(ts);
			continue;
		}

#ifdef ADAPTIVE_MUTEXES
		/*
		 * The current lock owner might have started executing
		 * on another CPU (or the lock could have changed
		 * owners) while we were waiting on the turnstile
		 * chain lock.  If so, drop the turnstile lock and try
		 * again.
		 */
		owner = (struct thread *)(v & ~MTX_FLAGMASK);
		if (TD_IS_RUNNING(owner)) {
			turnstile_cancel(ts);
			continue;
		}
#endif

		/*
		 * If the mutex isn't already contested and a failure occurs
		 * setting the contested bit, the mutex was either released
		 * or the state of the MTX_RECURSED bit changed.
		 */
		if ((v & MTX_CONTESTED) == 0 &&
		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
			turnstile_cancel(ts);
			continue;
		}

		/*
		 * We definitely must sleep for this lock.
		 */
		mtx_assert(m, MA_NOTOWNED);

#ifdef KTR
		if (!cont_logged) {
			CTR6(KTR_CONTENTION,
			    "contention: %p at %s:%d wants %s, taken by %s:%d",
			    (void *)tid, file, line, m->lock_object.lo_name,
			    WITNESS_FILE(&m->lock_object),
			    WITNESS_LINE(&m->lock_object));
			cont_logged = 1;
		}
#endif

		/*
		 * Block on the turnstile.
		 */
#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs(&m->lock_object);
#endif
		turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs(&m->lock_object);
		sleep_cnt++;
#endif
	}
#ifdef KDTRACE_HOOKS
	all_time += lockstat_nsecs(&m->lock_object);
#endif
#ifdef KTR
	if (cont_logged) {
		CTR4(KTR_CONTENTION,
		    "contention end: %s acquired by %p at %s:%d",
		    m->lock_object.lo_name, (void *)tid, file, line);
	}
#endif
	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
	    waittime, file, line);
#ifdef KDTRACE_HOOKS
	if (sleep_time)
		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);

	/*
	 * Only record the loops spinning and not sleeping. 
	 */
	if (lda.spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
#endif
}
Exemple #5
0
/*
 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
 *
 * This is only called if we need to actually spin for the lock. Recursion
 * is handled inline.
 */
void
_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
    const char *file, int line)
{
	struct mtx *m;
	struct lock_delay_arg lda;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	int64_t spin_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return;

	lock_delay_arg_init(&lda, &mtx_spin_delay);
	m = mtxlock2mtx(c);

	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);

#ifdef HWPMC_HOOKS
	PMC_SOFT_CALL( , , lock, failed);
#endif
	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
#ifdef KDTRACE_HOOKS
	spin_time -= lockstat_nsecs(&m->lock_object);
#endif
	for (;;) {
		if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
			break;
		/* Give interrupts a chance while we spin. */
		spinlock_exit();
		while (m->mtx_lock != MTX_UNOWNED) {
			if (lda.spin_cnt < 10000000) {
				lock_delay(&lda);
				continue;
			}
			lda.spin_cnt++;
			if (lda.spin_cnt < 60000000 || kdb_active ||
			    panicstr != NULL)
				DELAY(1);
			else
				_mtx_lock_spin_failed(m);
			cpu_spinwait();
		}
		spinlock_enter();
	}
#ifdef KDTRACE_HOOKS
	spin_time += lockstat_nsecs(&m->lock_object);
#endif

	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
	    "running");

#ifdef KDTRACE_HOOKS
	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
	    contested, waittime, file, line);
	if (spin_time != 0)
		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
#endif
}
Exemple #6
0
void
thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid, v;
	struct lock_delay_arg lda;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	int64_t spin_time = 0;
#endif
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
	int doing_lockprof = 1;
#endif

	tid = (uintptr_t)curthread;

	if (SCHEDULER_STOPPED()) {
		/*
		 * Ensure that spinlock sections are balanced even when the
		 * scheduler is stopped, since we may otherwise inadvertently
		 * re-enable interrupts while dumping core.
		 */
		spinlock_enter();
		return;
	}

	lock_delay_arg_init(&lda, &mtx_spin_delay);

#ifdef LOCK_PROFILING
	doing_lockprof = 1;
#elif defined(KDTRACE_HOOKS)
	doing_lockprof = lockstat_enabled;
	if (__predict_false(doing_lockprof))
		spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
#endif
	for (;;) {
retry:
		v = MTX_UNOWNED;
		spinlock_enter();
		m = td->td_lock;
		thread_lock_validate(m, opts, file, line);
		for (;;) {
			if (_mtx_obtain_lock_fetch(m, &v, tid))
				break;
			if (v == MTX_UNOWNED)
				continue;
			if (v == tid) {
				m->mtx_recurse++;
				break;
			}
#ifdef HWPMC_HOOKS
			PMC_SOFT_CALL( , , lock, failed);
#endif
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			do {
				if (lda.spin_cnt < 10000000) {
					lock_delay(&lda);
				} else {
					lda.spin_cnt++;
					if (lda.spin_cnt < 60000000 ||
					    kdb_active || panicstr != NULL)
						DELAY(1);
					else
						_mtx_lock_spin_failed(m);
					cpu_spinwait();
				}
				if (m != td->td_lock)
					goto retry;
				v = MTX_READ_VALUE(m);
			} while (v != MTX_UNOWNED);
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		__mtx_unlock_spin(m);	/* does spinlock_exit() */
	}
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);

#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
	if (__predict_true(!doing_lockprof))
		return;
#endif
#ifdef KDTRACE_HOOKS
	spin_time += lockstat_nsecs(&m->lock_object);
#endif
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
		    contested, waittime, file, line);
#ifdef KDTRACE_HOOKS
	if (lda.spin_cnt != 0)
		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
#endif
}
Exemple #7
0
void
_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
#endif
{
	struct mtx *m;
	struct lock_delay_arg lda;
	uintptr_t tid;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	int64_t spin_time = 0;
#endif
#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
	int doing_lockprof;
#endif

	tid = (uintptr_t)curthread;
	m = mtxlock2mtx(c);

	if (__predict_false(v == MTX_UNOWNED))
		v = MTX_READ_VALUE(m);

	if (__predict_false(v == tid)) {
		m->mtx_recurse++;
		return;
	}

	if (SCHEDULER_STOPPED())
		return;

	lock_delay_arg_init(&lda, &mtx_spin_delay);

	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);

#ifdef HWPMC_HOOKS
	PMC_SOFT_CALL( , , lock, failed);
#endif
	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
#ifdef LOCK_PROFILING
	doing_lockprof = 1;
#elif defined(KDTRACE_HOOKS)
	doing_lockprof = lockstat_enabled;
	if (__predict_false(doing_lockprof))
		spin_time -= lockstat_nsecs(&m->lock_object);
#endif
	for (;;) {
		if (v == MTX_UNOWNED) {
			if (_mtx_obtain_lock_fetch(m, &v, tid))
				break;
			continue;
		}
		/* Give interrupts a chance while we spin. */
		spinlock_exit();
		do {
			if (lda.spin_cnt < 10000000) {
				lock_delay(&lda);
			} else {
				lda.spin_cnt++;
				if (lda.spin_cnt < 60000000 || kdb_active ||
				    panicstr != NULL)
					DELAY(1);
				else
					_mtx_lock_spin_failed(m);
				cpu_spinwait();
			}
			v = MTX_READ_VALUE(m);
		} while (v != MTX_UNOWNED);
		spinlock_enter();
	}

	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
	    "running");

#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
	if (__predict_true(!doing_lockprof))
		return;
#endif
#ifdef KDTRACE_HOOKS
	spin_time += lockstat_nsecs(&m->lock_object);
#endif
	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
	    contested, waittime, file, line);
#ifdef KDTRACE_HOOKS
	if (lda.spin_cnt != 0)
		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
#endif
}
Exemple #8
0
int main(int argc, char **argv)
{
    struct sockaddr_in client_addr;
    struct client *client;
    struct client **client_ptr;
    int listen_fd, connect_fd;
    socklen_t client_len;
    sigset_t set;
    struct sigaction action;
    char *shim_host_path;
    char *parms_path;
    char *debug_log_path;
    struct parms *parms;
    char *ip;

    // Open debug.log file
    debug_log_path = getenv("DEBUG_LOG_PATH");
    if (!debug_log_path) debug_log_path = "debug.log";
    fp = fopen(debug_log_path, "w");
    if (!fp) {
        error_msg("Could not open debug.log");
        return -1;
    }

    // Mask SIGPIPE signal for all threads
    sigemptyset(&set);
    sigaddset(&set, SIGPIPE);
    if (pthread_sigmask(SIG_BLOCK, &set, NULL)) {
        perror("pthread_sigmask");
        return -1;
    }
    // Catch SIGINT for graceful termination
    action.sa_handler = _INThandler;
    sigemptyset(&(action.sa_mask));
    action.sa_flags = 0;
    sigaction(SIGINT, &action, NULL);

    // Report version
    info_msg("PSLSE version %d.%03d compiled @ %s %s", PSLSE_VERSION_MAJOR,
             PSLSE_VERSION_MINOR, __DATE__, __TIME__);
    debug_send_version(fp, PSLSE_VERSION_MAJOR, PSLSE_VERSION_MINOR);

    // Parse parameters file
    parms_path = getenv("PSLSE_PARMS");
    if (!parms_path) parms_path = "pslse.parms";
    parms = parse_parms(parms_path, fp);
    if (parms == NULL) {
        error_msg("Unable to parse params file \"%s\"", parms_path);
        return -1;
    }
    timeout = parms->timeout;

    // Connect to simulator(s) and start psl thread(s)
    pthread_mutex_init(&lock, NULL);
    pthread_mutex_lock(&lock);
    shim_host_path = getenv("SHIM_HOST_DAT");
    if (!shim_host_path) shim_host_path = "shim_host.dat";
    afu_map = parse_host_data(&psl_list, parms, shim_host_path, &lock, fp);
    if (psl_list == NULL) {
        free(parms);
        fclose(fp);
        warn_msg("Unable to connect to any simulators");
        return -1;
    }
    // Start server
    if ((listen_fd = _start_server()) < 0) {
        free(parms);
        fclose(fp);
        return -1;
    }
    // Watch for client connections
    while (psl_list != NULL) {
        // Wait for next client to connect
        client_len = sizeof(client_addr);
        pthread_mutex_unlock(&lock);
        connect_fd = accept(listen_fd, (struct sockaddr *)&client_addr,
                            &client_len);
        pthread_mutex_lock(&lock);
        if (connect_fd < 0) {
            lock_delay(&lock);
            continue;
        }
        ip = (char *)malloc(INET_ADDRSTRLEN + 1);
        inet_ntop(AF_INET, &(client_addr.sin_addr.s_addr), ip,
                  INET_ADDRSTRLEN);
        // Clean up disconnected clients
        client_ptr = &client_list;
        while (*client_ptr != NULL) {
            client = *client_ptr;
            if ((client->pending == 0)
                    && (client->state == CLIENT_NONE)) {
                *client_ptr = client->_next;
                if (client->_next != NULL)
                    client->_next->_prev = client->_prev;
                _free_client(client);
                lock_delay(&lock);
                continue;
            }
            client_ptr = &((*client_ptr)->_next);
        }
        // Add new client
        info_msg("Connection from %s", ip);
        client = _client_connect(&connect_fd, ip);
        if (client != NULL) {
            if (client_list != NULL)
                client_list->_prev = client;
            client->_next = client_list;
            client_list = client;
            if (pthread_create(&(client->thread), NULL,
                               _client_loop, client)) {
                perror("pthread_create");
                break;
            }
        }
        lock_delay(&lock);
    }
    info_msg("No AFUs connected, Shutting down PSLSE\n");
    close_socket(&listen_fd);

    // Shutdown unassociated client connections
    while (client_list != NULL) {
        client = client_list;
        client_list = client->_next;
        if (client->pending)
            client->pending = 0;
        pthread_mutex_unlock(&lock);
        pthread_join(client->thread, NULL);
        pthread_mutex_lock(&lock);
        close_socket(&(client->fd));
        _free_client(client);
    }
    pthread_mutex_unlock(&lock);

    free(parms);
    fclose(fp);
    pthread_mutex_destroy(&lock);

    return 0;
}
Exemple #9
0
static void *_client_loop(void *ptr)
{
    struct client *client = (struct client *)ptr;
    uint8_t data[2];
    int rc;

    pthread_mutex_lock(&lock);
    while (client->pending) {
        rc = bytes_ready(client->fd, client->timeout, &(client->abort));
        if (rc == 0) {
            lock_delay(&lock);
            continue;
        }
        if ((rc < 0) || get_bytes(client->fd, 1, data, 10,
                                  &(client->abort), fp, -1, -1) < 0) {
            client_drop(client, PSL_IDLE_CYCLES, CLIENT_NONE);
            break;
        }
        if (data[0] == PSLSE_QUERY) {
            if (get_bytes_silent(client->fd, 1, data, timeout,
                                 &(client->abort)) < 0) {
                debug_msg("_client_loop failed PSLSE_QUERY");
                client_drop(client, PSL_IDLE_CYCLES,
                            CLIENT_NONE);
                break;
            }
            _query(client, data[0]);
            lock_delay(&lock);
            continue;
        }
        if (data[0] == PSLSE_MAX_INT) {
            if (get_bytes(client->fd, 2, data, timeout,
                          &(client->abort), fp, -1, -1) < 0) {
                client_drop(client, PSL_IDLE_CYCLES,
                            CLIENT_NONE);
                break;
            }
            _max_irqs(client, data[0]);
            lock_delay(&lock);
            continue;
        }
        if (data[0] == PSLSE_OPEN) {
            if (get_bytes_silent(client->fd, 2, data, timeout,
                                 &(client->abort)) < 0) {
                client_drop(client, PSL_IDLE_CYCLES,
                            CLIENT_NONE);
                debug_msg("_client_loop: client associate failed; could not communicate with socket");
                break;
            }
            _client_associate(client, data[0], (char)data[1]);
            debug_msg("_client_loop: client associated");
            break;
        }
        client->pending = 0;
        break;
        lock_delay(&lock);
    }
    pthread_mutex_unlock(&lock);

    // Terminate thread
    pthread_exit(NULL);
}
Exemple #10
0
// PSL thread loop
static void *_psl_loop(void *ptr)
{
	struct psl *psl = (struct psl *)ptr;
	struct cmd_event *event, *temp;
	int events, i, stopped, reset;
	uint8_t ack = PSLSE_DETACH;

	stopped = 1;
	pthread_mutex_lock(psl->lock);
	while (psl->state != PSLSE_DONE) {
		// idle_cycles continues to generate clock cycles for some
		// time after the AFU has gone idle.  Eventually clocks will
		// not be presented to an idle AFU to keep simulation
		// waveforms from getting huge with no activity cycles.
		if (psl->state != PSLSE_IDLE) {
		  // if we have clients or we are in the reset state, refresh idle_cycles 
		  // so that the afu clock will not be allowed to stop to save afu event simulator cycles
		  if ((psl->attached_clients > 0) || (psl->state == PSLSE_RESET)) {
			psl->idle_cycles = PSL_IDLE_CYCLES;
			if (stopped)
				info_msg("Clocking %s", psl->name);
			fflush(stdout);
			stopped = 0;
		  }
		}
		if (psl->idle_cycles) {
			// Clock AFU
//printf("before psl_signal_afu_model in psl_loop \n");
			psl_signal_afu_model(psl->afu_event);
			// Check for events from AFU
			events = psl_get_afu_events(psl->afu_event);
//printf("after psl_get_afu_events, events is 0x%3x \n", events);
			// Error on socket
			if (events < 0) {
				warn_msg("Lost connection with AFU");
				break;
			}
			// Handle events from AFU
			if (events > 0)
				_handle_afu(psl);

			// Drive events to AFU
			send_job(psl->job);
			send_pe(psl->job);
			send_mmio(psl->mmio);

			if (psl->mmio->list == NULL)
				psl->idle_cycles--;
		} else {
			if (!stopped)
				info_msg("Stopping clocks to %s", psl->name);
			stopped = 1;
			lock_delay(psl->lock);
		}

		// Skip client section if AFU descriptor hasn't been read yet
		if (psl->client == NULL) {
			lock_delay(psl->lock);
			continue;
		}
		// Check for event from application
		reset = 0;
		for (i = 0; i < psl->max_clients; i++) {
			if (psl->client[i] == NULL)
				continue;
			if ((psl->client[i]->type == 'd') && 
			    (psl->client[i]->state == CLIENT_NONE) &&
			    (psl->client[i]->idle_cycles == 0)) {
			        // this was the old way of detaching a dedicated process app/afu pair
			        // we get the detach message, drop the client, and wait for idle cycle to get to 0
				put_bytes(psl->client[i]->fd, 1, &ack,
					  psl->dbg_fp, psl->dbg_id,
					  psl->client[i]->context);
				_free(psl, psl->client[i]);
				psl->client[i] = NULL;  // aha - this is how we only called _free once the old way
				                        // why do we not free client[i]?
				                        // because this was a short cut pointer
				                        // the *real* client point is in client_list in pslse
				reset = 1;
				// for m/s devices we need to do this differently and not send a reset...
				// _handle_client - creates the llcmd's to term and remove
				// send_pe - sends the llcmd pe's to afu one at a time
				// _handle_afu calls _handle_aux2
				// _handle_aux2 finishes the llcmd pe's when jcack is asserted by afu
				//   when the remove llcmd is processed, we should put_bytes, _free and set client[i] to NULL
				continue;
			}
			if (psl->state == PSLSE_RESET)
				continue;
			_handle_client(psl, psl->client[i]);
			if (psl->client[i]->idle_cycles) {
				psl->client[i]->idle_cycles--;
			}
			if (client_cmd(psl->cmd, psl->client[i])) {
				psl->client[i]->idle_cycles = PSL_IDLE_CYCLES;
			}
		}

		// Send reset to AFU
		if (reset == 1) {
			psl->cmd->buffer_read = NULL;
			event = psl->cmd->list;
			while (event != NULL) {
				if (reset) {
					warn_msg
					    ("Client dropped context before AFU completed");
					reset = 0;
				}
				info_msg("Dumping command tag=0x%02x",
					 event->tag);
#ifdef PSL9
				info_msg("Dumping itag=0x%02x utag=0x%02x type=0x%02x state=0x%02x",
					event->itag, event->utag, event->type, event->state);
#endif
				if (event->data) {
					free(event->data);
				}
				if (event->parity) {
					free(event->parity);
				}
				temp = event;
				event = event->_next;
				free(temp);
			}
			psl->cmd->list = NULL;
			info_msg("Sending reset to AFU");
			add_job(psl->job, PSL_JOB_RESET, 0L);
		}

		lock_delay(psl->lock);
	}

	// Disconnect clients
	for (i = 0; i < psl->max_clients; i++) {
		if ((psl->client != NULL) && (psl->client[i] != NULL)) {
			// FIXME: Send warning to clients first?
			info_msg("Disconnecting %s context %d", psl->name,
				 psl->client[i]->context);
			close_socket(&(psl->client[i]->fd));
		}
	}

	// DEBUG
	debug_afu_drop(psl->dbg_fp, psl->dbg_id);

	// Disconnect from simulator, free memory and shut down thread
	info_msg("Disconnecting %s @ %s:%d", psl->name, psl->host, psl->port);
	if (psl->client)
		free(psl->client);
	if (psl->_prev)
		psl->_prev->_next = psl->_next;
	if (psl->_next)
		psl->_next->_prev = psl->_prev;
	if (psl->cmd) {
		free(psl->cmd);
	}
	if (psl->job) {
		free(psl->job);
	}
	if (psl->mmio) {
		free(psl->mmio);
	}
	if (psl->host)
		free(psl->host);
	if (psl->afu_event) {
		psl_close_afu_event(psl->afu_event);
		free(psl->afu_event);
	}
	if (psl->name)
		free(psl->name);
	if (*(psl->head) == psl)
		*(psl->head) = psl->_next;
	pthread_mutex_unlock(psl->lock);
	free(psl);
	pthread_exit(NULL);
}