/* Return true if any threshold transition occurs. */
static int dpft_check_temp_threshold(int sensor_id, int temp)
{
	int tripped = 0;
	int max, i;

	for (i = 0; i < DPTF_THRESHOLDS_PER_SENSOR; i++) {

		max = dptf_threshold[sensor_id][i].temp;
		if (max < 0)			/* disabled? */
			continue;

		if (temp >= max)
			cond_set_true(&dptf_threshold[sensor_id][i].over);
		else if (temp <= max - DPTF_THRESHOLD_HYSTERESIS)
			cond_set_false(&dptf_threshold[sensor_id][i].over);

		if (cond_went_true(&dptf_threshold[sensor_id][i].over)) {
			CPRINTS("DPTF over threshold [%d][%d",
				sensor_id, i);
			atomic_or(&dptf_seen, (1 << sensor_id));
			tripped = 1;
		}
		if (cond_went_false(&dptf_threshold[sensor_id][i].over)) {
			CPRINTS("DPTF under threshold [%d][%d",
				sensor_id, i);
			atomic_or(&dptf_seen, (1 << sensor_id));
			tripped = 1;
		}
	}

	return tripped;
}
Example #2
0
static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
                unsigned size)
{
    EduState *edu = opaque;

    if (addr < 0x80 && size != 4) {
        return;
    }

    if (addr >= 0x80 && size != 4 && size != 8) {
        return;
    }

    switch (addr) {
    case 0x04:
        edu->addr4 = ~val;
        break;
    case 0x08:
        if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) {
            break;
        }
        /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only
         * set in this function and it is under the iothread mutex.
         */
        qemu_mutex_lock(&edu->thr_mutex);
        edu->fact = val;
        atomic_or(&edu->status, EDU_STATUS_COMPUTING);
        qemu_cond_signal(&edu->thr_cond);
        qemu_mutex_unlock(&edu->thr_mutex);
        break;
    case 0x20:
        if (val & EDU_STATUS_IRQFACT) {
            atomic_or(&edu->status, EDU_STATUS_IRQFACT);
        } else {
            atomic_and(&edu->status, ~EDU_STATUS_IRQFACT);
        }
        break;
    case 0x60:
        edu_raise_irq(edu, val);
        break;
    case 0x64:
        edu_lower_irq(edu, val);
        break;
    case 0x80:
        dma_rw(edu, true, &val, &edu->dma.src, false);
        break;
    case 0x88:
        dma_rw(edu, true, &val, &edu->dma.dst, false);
        break;
    case 0x90:
        dma_rw(edu, true, &val, &edu->dma.cnt, false);
        break;
    case 0x98:
        if (!(val & EDU_DMA_RUN)) {
            break;
        }
        dma_rw(edu, true, &val, &edu->dma.cmd, true);
        break;
    }
}
Example #3
0
static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
						  struct zfcp_adapter *adapter,
						  struct zfcp_port *port,
						  struct scsi_device *sdev)
{
	struct zfcp_erp_action *erp_action;
	struct zfcp_scsi_dev *zfcp_sdev;

	switch (need) {
	case ZFCP_ERP_ACTION_REOPEN_LUN:
		zfcp_sdev = sdev_to_zfcp(sdev);
		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
			if (scsi_device_get(sdev))
				return NULL;
		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
				&zfcp_sdev->status);
		erp_action = &zfcp_sdev->erp_action;
		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
		erp_action->port = port;
		erp_action->sdev = sdev;
		if (!(atomic_read(&zfcp_sdev->status) &
		      ZFCP_STATUS_COMMON_RUNNING))
			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
		break;

	case ZFCP_ERP_ACTION_REOPEN_PORT:
	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
		if (!get_device(&port->dev))
			return NULL;
		zfcp_erp_action_dismiss_port(port);
		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
		erp_action = &port->erp_action;
		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
		erp_action->port = port;
		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
		break;

	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
		kref_get(&adapter->ref);
		zfcp_erp_action_dismiss_adapter(adapter);
		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
		erp_action = &adapter->erp_action;
		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING))
			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
		break;

	default:
		return NULL;
	}

	erp_action->adapter = adapter;
	erp_action->action = need;
	erp_action->status = act_status;

	return erp_action;
}
Example #4
0
int
mp3_init(
	int stereo,
	float framerate,
	float bitrate,
	void ** cookie)
{
	//fprintf(stderr, "mp3_init(%s, %g, %g)\n", stereo ? "stereo" : "mono", framerate, bitrate);


	int32 v = atomic_or(&inited, 1);
	if (v == 0) {
		g_lock = create_sem(1, "EncoderGlobals");
		atomic_or(&inited, 2);
	}
	else while (!(v & 2)) {
		snooze(10000);
		v = atomic_or(&inited, 0);
	}
	thread_info tinfo;
	get_thread_info(find_thread(NULL), &tinfo);
	//fprintf(stderr,
	//		"thread %d (%s) tries to lock blade_mp3_globals\n",
	//		tinfo.thread,
	//		tinfo.name);
	status_t ret = acquire_sem(g_lock);
	if (ret == B_OK) 
	{
		//fprintf(stderr,
		//		"blade_mp3_globals holder is thread %d (%s)\n",
		//		tinfo.thread,
		//		tinfo.name);

		CodecInitIn info;
		info.frequency = (int32)framerate;
		info.mode = (stereo ? 0 : 3);
		info.bitrate = (int32)(128000.0/1000);
		info.emphasis = 0;
		info.fPrivate = 0;
		info.fCRC = 0;
		info.fCopyright = 1;
		info.fOriginal = 1;

		CodecInitOut * p = codecInit(&info);
		if (p == 0) 
		{
			//fprintf(stderr, "codecInit returns error\n");
			ret = B_ERROR;
		}
		else 
		{
			//fprintf(stderr, "p->nSamples = %d\n", p->nSamples);
			assert(p->nSamples == 1152*(stereo ? 2 : 1));
			*cookie = (void *)1;
		}
	}
	return ret;
}
Example #5
0
BResourceSet& Resources()
{
	if (atomic_or(&gInitResources, 1) == 0) {
		gResources.AddResources((void*)Resources);
		atomic_or(&gInitResources, 2);
	} else {
		while ((gInitResources&2) == 0) snooze(20000);
	}
	return gResources;
}
Example #6
0
void
acquire_spinlock(spinlock* lock)
{
#if DEBUG_SPINLOCKS
	if (are_interrupts_enabled()) {
		panic("acquire_spinlock: attempt to acquire lock %p with interrupts "
			"enabled", lock);
	}
#endif

	if (sNumCPUs > 1) {
		int currentCPU = smp_get_current_cpu();
#if B_DEBUG_SPINLOCK_CONTENTION
		while (atomic_add(&lock->lock, 1) != 0)
			process_all_pending_ici(currentCPU);
#else
		while (1) {
			uint32 count = 0;
			while (*lock != 0) {
				if (++count == SPINLOCK_DEADLOCK_COUNT) {
					panic("acquire_spinlock(): Failed to acquire spinlock %p "
						"for a long time!", lock);
					count = 0;
				}

				process_all_pending_ici(currentCPU);
				PAUSE();
			}
			if (atomic_or((int32*)lock, 1) == 0)
				break;
		}

#	if DEBUG_SPINLOCKS
		push_lock_caller(arch_debug_get_caller(), lock);
#	endif
#endif
	} else {
#if DEBUG_SPINLOCKS
		int32 oldValue;
		oldValue = atomic_or((int32*)lock, 1);
		if (oldValue != 0) {
			panic("acquire_spinlock: attempt to acquire lock %p twice on "
				"non-SMP system (last caller: %p, value %" B_PRId32 ")", lock,
				find_lock_caller(lock), oldValue);
		}

		push_lock_caller(arch_debug_get_caller(), lock);
#endif
	}
#if DEBUG_SPINLOCK_LATENCIES
	push_latency(lock);
#endif
}
Example #7
0
static status_t
keyboard_open(const char *name, uint32 flags, void **_cookie)
{
	status_t status;

	TRACE("ps2: keyboard_open %s\n", name);

	if (atomic_or(&sKeyboardOpenMask, 1) != 0)
		return B_BUSY;

	status = probe_keyboard();
	if (status != B_OK) {
		INFO("ps2: keyboard probing failed\n");
		ps2_service_notify_device_removed(&ps2_device[PS2_DEVICE_KEYB]);
		goto err1;
	}

	INFO("ps2: keyboard found\n");

	sKeyboardSem = create_sem(0, "keyboard_sem");
	if (sKeyboardSem < 0) {
		status = sKeyboardSem;
		goto err1;
	}

	sKeyBuffer = create_packet_buffer(KEY_BUFFER_SIZE * sizeof(at_kbd_io));
	if (sKeyBuffer == NULL) {
		status = B_NO_MEMORY;
		goto err2;
	}

	*_cookie = NULL;
	ps2_device[PS2_DEVICE_KEYB].disconnect = &ps2_keyboard_disconnect;
	ps2_device[PS2_DEVICE_KEYB].handle_int = &keyboard_handle_int;

	atomic_or(&ps2_device[PS2_DEVICE_KEYB].flags, PS2_FLAG_ENABLED);

	TRACE("ps2: keyboard_open %s success\n", name);
	return B_OK;

err2:
	delete_sem(sKeyboardSem);
err1:
	atomic_and(&sKeyboardOpenMask, 0);

	TRACE("ps2: keyboard_open %s failed\n", name);
	return status;
}
Example #8
0
int
sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
{
	struct thread *thread = thread_get_current_thread();
	sigset_t oldMask = atomic_get(&thread->sig_block_mask);

	if (set != NULL) {
		T(SigProcMask(how, *set));

		switch (how) {
			case SIG_BLOCK:
				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
				break;
			case SIG_UNBLOCK:
				atomic_and(&thread->sig_block_mask, ~*set);
				break;
			case SIG_SETMASK:
				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
				break;
			default:
				return B_BAD_VALUE;
		}

		update_current_thread_signals_flag();
	}

	if (oldSet != NULL)
		*oldSet = oldMask;

	return B_OK;
}
Example #9
0
status_t
mutex_lock(mutex *lock)
{
	uint32 count = 0;
	const uint32 kMaxCount
		= (lock->flags & MUTEX_FLAG_ADAPTIVE) != 0 ? MAX_UNSUCCESSFUL_SPINS : 1;

	int32 oldValue;
	do {
		// set the locked flag
		oldValue = atomic_or(&lock->lock, B_USER_MUTEX_LOCKED);

		if ((oldValue & (B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING)) == 0
				|| (oldValue & B_USER_MUTEX_DISABLED) != 0) {
			// No one has the lock or is waiting for it, or the mutex has been
			// disabled.
			return B_OK;
		}
	} while (count++ < kMaxCount && (oldValue & B_USER_MUTEX_WAITING) != 0);

	// we have to call the kernel
	status_t error;
	do {
		error = _kern_mutex_lock(&lock->lock, lock->name, 0, 0);
	} while (error == B_INTERRUPTED);

	return error;
}
Example #10
0
int32 
BBlockFIFO::CopyNextBufferIn(const void *source, size_t requestSize, bigtime_t timeout, bool atEndOfData)
{
	if (source == 0) { FPRINTF(stderr, "BAD_VALUE: source == NULL\n"); return B_BAD_VALUE; }
	if (requestSize == 0) {
		if (atEndOfData) {
ENTER_PUT
			atomic_or(&_mFlags, flagEndOfData);
			delete_sem(_mGetSem);
			_mGetSem = -1;
LEAVE_PUT
		}
		return 0;
	}
	char * s = (char *)source;
	int32 total = 0;
	while (requestSize > 0) {
		void * ptr;
		int got = BeginPut(&ptr, requestSize, timeout);
		if (got < 0) return (total > 0 ? total : got);
		requestSize -= got;
		memcpy(ptr, s, got);
		s += got;
		total += got;
		(void)EndPut((requestSize == 0) ? atEndOfData : false);
	}
	return total;
}
Example #11
0
File: driver.c Project: DonCN/haiku
static int32
eng_interrupt(void *data)
{
	int32 handled = B_UNHANDLED_INTERRUPT;
	device_info *di = (device_info *)data;
	shared_info *si = di->si;
	int32 *flags = &(si->flags);
	vuint32 *regs;

	/* is someone already handling an interrupt for this device? */
	if (atomic_or(flags, SKD_HANDLER_INSTALLED) & SKD_HANDLER_INSTALLED) {
		goto exit0;
	}
	/* get regs */
	regs = di->regs;

	/* was it a VBI? */
	if (caused_vbi(regs)) {
		/*clear the interrupt*/
		clear_vbi(regs);
		/*release the semaphore*/
		handled = thread_interrupt_work(flags, regs, si);
	}

	/* note that we're not in the handler any more */
	atomic_and(flags, ~SKD_HANDLER_INSTALLED);

exit0:
	return handled;
}
Example #12
0
int32 
BBlockFIFO::BeginPut(void **outData, size_t requestSize, bigtime_t timeout)
{
	if (!outData) { FPRINTF(stderr, "BAD_VALUE: outData == NULL\n"); return B_BAD_VALUE; }
	if (_mFlags & flagEndOfData) { FPRINTF(stderr, "EPERM: end of data\n"); return EPERM; }
ENTER_GET
	if (requestSize > _mBufferSize) {
		requestSize = _mBufferSize;
	}
	ssize_t o = _mPutOff + requestSize;
	if (o > (ssize_t)_mAreaSize) {
		o = _mAreaSize;
	}
	int32 req = o-_mPutOff;
	status_t err = acquire_sem_etc(_mPutSem, req, B_TIMEOUT, timeout);
	if (err < B_OK) {
LEAVE_PUT
		FPRINTF(stderr, "BeginPut: acquire_sem_etc() returns %ld (req is %ld)\n", err, req);
		return err;
	}
	*outData = _mBuffer + _mPutOff;
	if (o == (ssize_t)_mAreaSize)
		_mPendingPut = 0;
	else
		_mPendingPut = o;
	atomic_or(&_mFlags, flagPendingPut);
	return req;
}
Example #13
0
/* Attempts to get a lock on the page for IO operations.  If it is already
 * locked, it will block the kthread until it is unlocked.  Note that this is
 * really a "sleep on some event", not necessarily the IO, but it is "the page
 * is ready". */
void lock_page(struct page *page)
{
	/* when this returns, we have are the ones to have locked the page */
	sem_down(&page->pg_sem);
	assert(!(atomic_read(&page->pg_flags) & PG_LOCKED));
	atomic_or(&page->pg_flags, PG_LOCKED);
}
Example #14
0
/**
	Set the direction of a GPIO.
	
	\param	gpio_id
			identifier of the GPIO, must be created by GPIO_MAKE_ID()
	\param	dir
			direction of the GPIO, either \ref GPIO_OUTPUT or \ref GPIO_INPUT .
*/
void gpio_set_dir(gpio gpio_id, int dir)
{
	int pin = gpio_id & 0xF;
	unsigned int mask;
	volatile unsigned int * ptr =  (volatile unsigned int *) (gpio_id >> 4);
	
/*
	I Cannot do the test since not every PIC has PORTA and PORTG 
	if(((ptr < GPIO_PORTA) || (ptr > GPIO_PORTG)) && ptr)
		ERROR(GPIO_INVALID_GPIO, &gpio_id);
*/	
	if(ptr == GPIO_NONE) 
		return;

	if(dir == GPIO_OUTPUT)
	{
		mask = ~(1 << pin);
		atomic_and(ptr, mask);
	}
	else if (dir == GPIO_INPUT)
	{
		mask = 1 << pin;
		atomic_or(ptr, mask);
	}
	else
		ERROR(GPIO_INVALID_DIR, &dir);
}
Example #15
0
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	atomic_or(pending, &mdp5_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
Example #16
0
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
				   struct zfcp_port *port,
				   struct scsi_device *sdev,
				   char *id, u32 act_status)
{
	int retval = 1, need;
	struct zfcp_erp_action *act;

	if (!adapter->erp_thread)
		return -EIO;

	need = zfcp_erp_required_act(want, adapter, port, sdev);
	if (!need)
		goto out;

	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
	if (!act)
		goto out;
	atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
	++adapter->erp_total_count;
	list_add_tail(&act->list, &adapter->erp_ready_head);
	wake_up(&adapter->erp_ready_wq);
	retval = 0;
 out:
	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
	return retval;
}
Example #17
0
void sndboard_rethink(void)
{
	struct toccata_data *data = &toccata;
	atomic_and(&uae_int_requested, ~0x200);
	if (data->toccata_irq)
		atomic_or(&uae_int_requested, 0x200);
}
void Task::Signal(EventFlags events)
{
    //Fancy no mutex implementation. We atomically mask the new events into
    //the event mask. Because atomic_or returns the old state of the mask,
    //we only schedule this task once.
    events |= kAlive;
    EventFlags oldEvents = atomic_or(&fEvents, events);
    if ((!(oldEvents & kAlive)) && (TaskThreadPool::sNumTaskThreads > 0))
    {
        if (fUseThisThread != NULL)
            // Task needs to be placed on a particular thread.
         {
            if (TASK_DEBUG) if (fTaskName[0] == 0) ::strcpy(fTaskName, " corrupt task");
            if (TASK_DEBUG) qtss_printf("Task::Signal enque TaskName=%s fUseThisThread=%lu q elem=%lu enclosing=%lu\n", fTaskName, (UInt32) fUseThisThread, (UInt32) &fTaskQueueElem, (UInt32) this);
            fUseThisThread->fTaskQueue.EnQueue(&fTaskQueueElem);
        }
        else
        {
            //find a thread to put this task on
            unsigned int theThread = atomic_add(&sThreadPicker, 1);
            theThread %= TaskThreadPool::sNumTaskThreads;
            if (TASK_DEBUG) if (fTaskName[0] == 0) ::strcpy(fTaskName, " corrupt task");
            if (TASK_DEBUG) qtss_printf("Task::Signal enque TaskName=%s thread=%lu q elem=%lu enclosing=%lu\n", fTaskName, (UInt32)TaskThreadPool::sTaskThreadArray[theThread],(UInt32) &fTaskQueueElem,(UInt32) this);
            TaskThreadPool::sTaskThreadArray[theThread]->fTaskQueue.EnQueue(&fTaskQueueElem);
        }
    }
    else
        if (TASK_DEBUG) qtss_printf("Task::Signal sent to dead TaskName=%s  q elem=%lu  enclosing=%lu\n",  fTaskName, (UInt32) &fTaskQueueElem, (UInt32) this);
        

}
Example #19
0
/**
	Set the value of a GPIO.
	
	\param	gpio_id
			identifier of the GPIO, must be created by GPIO_MAKE_ID().
	\param	value
			\ref true for VCC, \ref false for GND.
*/
void gpio_write(gpio gpio_id, bool value)
{
	int pin = gpio_id & 0xF;
	unsigned int mask;
	volatile unsigned int * ptr = (volatile unsigned int *) (gpio_id >> 4);

/*
	I Cannot do the test since not every PIC has PORTA and PORTG 
	if(((ptr < GPIO_PORTA) || (ptr > GPIO_PORTG)) && ptr)
		ERROR(GPIO_INVALID_GPIO, &gpio_id);
*/
	if(ptr == GPIO_NONE) 
		return;

	ptr += 2;
	if(value == false)
	{
		mask = ~(1 << pin);
		atomic_and(ptr, mask);
	}
	else if(value == true)
	{
		mask = 1 << pin;
		atomic_or(ptr, mask);
	}
	else
		ERROR(GPIO_INVALID_VALUE, &value);
}
Example #20
0
static void
user_mutex_unlock_locked(int32* mutex, addr_t physicalAddress, uint32 flags)
{
	if (UserMutexEntry* entry = sUserMutexTable.Lookup(physicalAddress)) {
		// Someone is waiting -- set the locked flag. It might still be set,
		// but when using userland atomic operations, the caller will usually
		// have cleared it already.
		int32 oldValue = atomic_or(mutex, B_USER_MUTEX_LOCKED);

		// unblock the first thread
		entry->locked = true;
		entry->condition.NotifyOne();

		if ((flags & B_USER_MUTEX_UNBLOCK_ALL) != 0
				|| (oldValue & B_USER_MUTEX_DISABLED) != 0) {
			// unblock all the other waiting threads as well
			for (UserMutexEntryList::Iterator it
					= entry->otherEntries.GetIterator();
				UserMutexEntry* otherEntry = it.Next();) {
				otherEntry->locked = true;
				otherEntry->condition.NotifyOne();
			}
		}
	} else {
		// no one is waiting -- clear locked flag
		atomic_and(mutex, ~(int32)B_USER_MUTEX_LOCKED);
	}
}
Example #21
0
static int32
InterruptHandler(void* data)
{
	int32 handled = B_UNHANDLED_INTERRUPT;
	DeviceInfo& di = *((DeviceInfo*)data);
	int32* flags = &(di.flags);

	// Is someone already handling an interrupt for this device?
	if (atomic_or(flags, SKD_HANDLER_INSTALLED) & SKD_HANDLER_INSTALLED)
		return B_UNHANDLED_INTERRUPT;

	if (InterruptIsVBI()) {	// was interrupt a VBI?
		ClearVBI();			// clear interrupt

		handled = B_HANDLED_INTERRUPT;

		// Release vertical blanking semaphore.
		sem_id& sem = di.sharedInfo->vertBlankSem;

		if (sem >= 0) {
			int32 blocked;
			if ((get_sem_count(sem, &blocked) == B_OK) && (blocked < 0)) {
				release_sem_etc(sem, -blocked, B_DO_NOT_RESCHEDULE);
				handled = B_INVOKE_SCHEDULER;
			}
		}
	}

	atomic_and(flags, ~SKD_HANDLER_INSTALLED);	// note we're not in handler anymore

	return handled;
}
Example #22
0
static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);

	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
Example #23
0
/*!	Updates the thread::flags field according to what signals are pending.
	Interrupts must be disabled and the thread lock must be held.
*/
static void
update_thread_signals_flag(struct thread* thread)
{
	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
	else
		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
}
Example #24
0
/*!	Spin until all CPUs have reached the rendez-vous point.

	The rendez-vous variable \c *var must have been initialized to 0 before the
	function is called. The variable will be non-null when the function returns.

	Note that when the function returns on one CPU, it only means that all CPU
	have already entered the function. It does not mean that the variable can
	already be reset. Only when all CPUs have returned (which would have to be
	ensured via another rendez-vous) the variable can be reset.
*/
void
smp_cpu_rendezvous(volatile uint32* var, int current_cpu)
{
	atomic_or((vint32*)var, 1 << current_cpu);

	while (*var != (((uint32)1 << sNumCPUs) - 1))
		PAUSE();
}
Example #25
0
/* Console commands */
static int command_ec_int(int argc, char **argv)
{
	/* Indicate that ec_int gpio is active due to host command */
	atomic_or(&ec_int_status, PD_STATUS_HOST_EVENT);
	pd_send_ec_int();

	return EC_SUCCESS;
}
Example #26
0
void gpio_set_opendrain(gpio gpio_id, int opendrain) {
	/* Since this is a configuration function, it's not time-critical so I can do it the "dumb" way
	 */
	int port, pin, mask;
	volatile unsigned int * ptr;
	pin = gpio_id & 0xF;
	port = gpio_id >> 4;
	
	if(!(gpio_id & 0xFFF0))	/* GPIO_NONE */
		return;
	
#if ODC_EXIST(A)
	if(port == (unsigned int) GPIO_PORTA) {
			ptr = (volatile unsigned int *) &ODCA;
	} else
#endif
#if ODC_EXIST(B)
	if(port == (unsigned int) GPIO_PORTB) {
			ptr = (volatile unsigned int *) &ODCB;
	} else 
#endif
#if ODC_EXIST(C)
	if(port == (unsigned int) GPIO_PORTC) {
			ptr = (volatile unsigned int *) &ODCC;
	} else 
#endif
#if ODC_EXIST(D)
	if(port == (unsigned int) GPIO_PORTD) {
			ptr = (volatile unsigned int *) &ODCD;
	} else 
#endif
#if ODC_EXIST(E)
	if(port == (unsigned int) GPIO_PORTE) {
			ptr = (volatile unsigned int *) &ODCE;	
	} else 
#endif
#if ODC_EXIST(F)
	if(port == (unsigned int) GPIO_PORTF) {
			ptr = (volatile unsigned int *) &ODCF;
	} else 
#endif
#if ODC_EXIST(G)
	if(port == (unsigned int) GPIO_PORTG) {
			ptr = (volatile unsigned int *) &ODCG;
	} else
#endif
	{
			ERROR(GPIO_INVALID_GPIO, &gpio_id);	
	}

	if(opendrain) {
		mask = 1 << pin;
		atomic_or(ptr, mask);
	} else {
		mask = ~(1 << pin);
		atomic_and(ptr, mask);
	}
}
Example #27
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval = -ENOMEM;

	kref_get(&adapter->ref);

	port = zfcp_get_port_by_wwpn(adapter, wwpn);
	if (port) {
		put_device(&port->dev);
		retval = -EEXIST;
		goto err_out;
	}

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		goto err_out;

	rwlock_init(&port->unit_list_lock);
	INIT_LIST_HEAD(&port->unit_list);
	atomic_set(&port->units, 0);

	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;
	port->rport_task = RPORT_NONE;
	port->dev.parent = &adapter->ccw_device->dev;
	port->dev.groups = zfcp_port_attr_groups;
	port->dev.release = zfcp_port_release;

	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
		kfree(port);
		goto err_out;
	}
	retval = -EINVAL;

	if (device_register(&port->dev)) {
		put_device(&port->dev);
		goto err_out;
	}

	write_lock_irq(&adapter->port_list_lock);
	list_add_tail(&port->list, &adapter->port_list);
	write_unlock_irq(&adapter->port_list_lock);

	atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);

	return port;

err_out:
	zfcp_ccw_adapter_put(adapter);
	return ERR_PTR(retval);
}
Example #28
0
void
BTimeSource::DirectStart(bigtime_t at)
{
	CALLED();
	if (fBuf)
		atomic_or(&fBuf->isrunning, 1);
	else
		fStarted = true;
}
Example #29
0
AmSongFunctionRoster* AmSongFunctionRoster::Default()
{
	if (atomic_or(&gRosterCreated, 1) == 0) {
		gRoster = new AmSongFunctionRoster();
	} else {
		while (!gRoster) sleep(20000);
	}
	return gRoster;
}
Example #30
0
/* Send host event up to AP */
void pd_send_host_event(int mask)
{
	/* mask must be set */
	if (!mask)
		return;

	atomic_or(&(host_event_status.status), mask);
	/* interrupt the AP */
	host_set_single_event(EC_HOST_EVENT_PD_MCU);
}