Exemplo n.º 1
0
void
M68KVMTranslationMap::Flush()
{
	if (fInvalidPagesCount <= 0)
		return;

	Thread* thread = thread_get_current_thread();
	thread_pin_to_current_cpu(thread);

	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
		// invalidate all pages
		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
			fInvalidPagesCount);

		if (fIsKernelMap) {
			arch_cpu_global_TLB_invalidate();
			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
				NULL, SMP_MSG_FLAG_SYNC);
		} else {
			cpu_status state = disable_interrupts();
			arch_cpu_user_TLB_invalidate();
			restore_interrupts(state);

			int cpu = smp_get_current_cpu();
			CPUSet cpuMask = PagingStructures()->active_on_cpus;
			cpuMask.ClearBit(cpu);

			if (!cpuMask.IsEmpty()) {
				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
			}
		}
	} else {
		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
			fInvalidPagesCount);

		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);

		if (fIsKernelMap) {
			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
				SMP_MSG_FLAG_SYNC);
		} else {
			int cpu = smp_get_current_cpu();
			CPUSet cpuMask = PagingStructures()->active_on_cpus;
			cpuMask.ClearBit(cpu);

			if (!cpuMask.IsEmpty()) {
				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
					SMP_MSG_FLAG_SYNC);
			}
		}
	}
	fInvalidPagesCount = 0;

	thread_unpin_from_current_cpu(thread);
}
Exemplo n.º 2
0
/*!	Finds a free message and gets it.
	NOTE: has side effect of disabling interrupts
	return value is the former interrupt state
*/
static cpu_status
find_free_message(struct smp_msg** msg)
{
	cpu_status state;

	TRACE(("find_free_message: entry\n"));

retry:
	while (sFreeMessageCount <= 0) {
		state = disable_interrupts();
		process_all_pending_ici(smp_get_current_cpu());
		restore_interrupts(state);
		PAUSE();
	}
	state = disable_interrupts();
	acquire_spinlock(&sFreeMessageSpinlock);

	if (sFreeMessageCount <= 0) {
		// someone grabbed one while we were getting the lock,
		// go back to waiting for it
		release_spinlock(&sFreeMessageSpinlock);
		restore_interrupts(state);
		goto retry;
	}

	*msg = sFreeMessages;
	sFreeMessages = (*msg)->next;
	sFreeMessageCount--;

	release_spinlock(&sFreeMessageSpinlock);

	TRACE(("find_free_message: returning msg %p\n", *msg));

	return state;
}
Exemplo n.º 3
0
static int32
acpi_cstate_idle(int32 state, CpuidleDevice *device)
{
	CpuidleCstate *cState = &device->cStates[state];
	acpi_cstate_info *ci = (acpi_cstate_info *)cState->pData;
	if (!ci->skip_bm_sts) {
		// we fall back to C1 if there's bus master activity
		if (acpi_cstate_bm_check())
			state = 1;
	}
	if (ci->type != ACPI_STATE_C3)
		acpi_cstate_enter(cState);

	// set BM_RLD for Bus Master to activity to wake the system from C3
	// With Newer chipsets BM_RLD is a NOP Since DMA is automatically handled
	// during C3 State
	acpi_cpuidle_driver_info *pi = sAcpiProcessor[smp_get_current_cpu()];
	if (pi->flags & ACPI_FLAG_C_BM)
		sAcpi->write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);

	// disable bus master arbitration during C3
	if (pi->flags & ACPI_FLAG_C_ARB)
		sAcpi->write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);

	acpi_cstate_enter(cState);

	// clear BM_RLD and re-enable the arbiter
	if (pi->flags & ACPI_FLAG_C_BM)
		sAcpi->write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);

	if (pi->flags & ACPI_FLAG_C_ARB)
		sAcpi->write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);

	return state;
}
Exemplo n.º 4
0
void
SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
{
	int cpu = smp_get_current_cpu();

	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
		// When re-entering, we already hold the lock.

	system_profiler_thread_enqueued_in_run_queue* event
		= (system_profiler_thread_enqueued_in_run_queue*)
			_AllocateBuffer(
				sizeof(system_profiler_thread_enqueued_in_run_queue),
				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
	if (event == NULL)
		return;

	event->time = system_time_nsecs();
	event->thread = thread->id;
	event->priority = thread->priority;

	fHeader->size = fBufferSize;

	// Unblock the profiler thread, if necessary, but don't unblock the thread,
	// if it had been waiting on a condition variable, since then we'd likely
	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
	// spinlock.
	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
		_MaybeNotifyProfilerThreadLocked();
}
Exemplo n.º 5
0
void
SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
{
	int cpu = smp_get_current_cpu();

	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
		// When re-entering, we already hold the lock.

	// If the old thread starts waiting, handle the wait object.
	if (oldThread->state == B_THREAD_WAITING)
		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);

	system_profiler_thread_scheduled* event
		= (system_profiler_thread_scheduled*)
			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
	if (event == NULL)
		return;

	event->time = system_time_nsecs();
	event->thread = newThread->id;
	event->previous_thread = oldThread->id;
	event->previous_thread_state = oldThread->state;
	event->previous_thread_wait_object_type = oldThread->wait.type;
	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;

	fHeader->size = fBufferSize;

	// unblock the profiler thread, if necessary
	_MaybeNotifyProfilerThreadLocked();
}
Exemplo n.º 6
0
static int32
x86_smp_error_interrupt(void *data)
{
	// smp error interrupt
	TRACE(("smp error interrupt on cpu %" B_PRId32 "\n", smp_get_current_cpu()));
	return B_HANDLED_INTERRUPT;
}
Exemplo n.º 7
0
static int32
x86_ici_interrupt(void *data)
{
	// genuine inter-cpu interrupt
	int cpu = smp_get_current_cpu();
	TRACE(("inter-cpu interrupt on cpu %d\n", cpu));
	return smp_intercpu_int_handler(cpu);
}
Exemplo n.º 8
0
void arch_thread_context_switch(struct thread *t_from, struct thread *t_to, struct vm_translation_map_struct *new_tmap)
{
	addr_t new_pgdir;
#if 0
	int i;

	dprintf("arch_thread_context_switch: cpu %d 0x%x -> 0x%x, aspace 0x%x -> 0x%x, old stack = 0x%x:0x%x, stack = 0x%x:0x%x\n",
		smp_get_current_cpu(), t_from->id, t_to->id,
		t_from->proc->aspace, t_to->proc->aspace,
		t_from->arch_info.current_stack.ss, t_from->arch_info.current_stack.esp,
		t_to->arch_info.current_stack.ss, t_to->arch_info.current_stack.esp);
#endif
#if 0
	for(i=0; i<11; i++)
		dprintf("*esp[%d] (0x%x) = 0x%x\n", i, ((unsigned int *)new_at->esp + i), *((unsigned int *)new_at->esp + i));
#endif
	x86_64_set_kstack(t_to->kernel_stack_base + KSTACK_SIZE);

#if 0
{
	int a = *(int *)(t_to->kernel_stack_base + KSTACK_SIZE - 4);
}
#endif

	if(t_from->proc->aspace_id >= 0 && t_to->proc->aspace_id >= 0) {
		// they are both uspace threads
		if(t_from->proc->aspace_id == t_to->proc->aspace_id) {
			// dont change the pgdir, same address space
			new_pgdir = NULL;
		} else {
			// switching to a new address space
			new_pgdir = vm_translation_map_get_pgdir(&t_to->proc->aspace->translation_map);
		}
	} else if(t_from->proc->aspace_id < 0 && t_to->proc->aspace_id < 0) {
		// they must both be kspace threads
		new_pgdir = NULL;
	} else if(t_to->proc->aspace_id < 0) {
		// the one we're switching to is kspace
		new_pgdir = vm_translation_map_get_pgdir(&t_to->proc->kaspace->translation_map);
	} else {
		new_pgdir = vm_translation_map_get_pgdir(&t_to->proc->aspace->translation_map);
	}
#if 0
	dprintf("new_pgdir is 0x%x\n", new_pgdir);
#endif

#if 0
{
	int a = *(int *)(t_to->arch_info.current_stack.esp - 4);
}
#endif

	if((new_pgdir % PAGE_SIZE) != 0)
		panic("arch_thread_context_switch: bad pgdir %p\n", (void*)new_pgdir);

	x86_64_fsave_swap(t_from->arch_info.fpu_state, t_to->arch_info.fpu_state);
	x86_64_context_switch(&t_from->arch_info.sp, t_to->arch_info.sp, new_pgdir);
}
Exemplo n.º 9
0
void
smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2,
	addr_t data3, void* dataPointer, uint32 flags)
{
	struct smp_msg *msg;

	TRACE(("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, "
		"data3 0x%lx, ptr %p, flags 0x%lx\n", targetCPU, message, data, data2,
		data3, dataPointer, flags));

	if (sICIEnabled) {
		int state;
		int currentCPU;

		// find_free_message leaves interrupts disabled
		state = find_free_message(&msg);

		currentCPU = smp_get_current_cpu();
		if (targetCPU == currentCPU) {
			return_free_message(msg);
			restore_interrupts(state);
			return; // nope, cant do that
		}

		// set up the message
		msg->message = message;
		msg->data = data;
		msg->data2 = data2;
		msg->data3 = data3;
		msg->data_ptr = dataPointer;
		msg->ref_count = 1;
		msg->flags = flags;
		msg->done = false;

		// stick it in the appropriate cpu's mailbox
		acquire_spinlock_nocheck(&sCPUMessageSpinlock[targetCPU]);
		msg->next = sCPUMessages[targetCPU];
		sCPUMessages[targetCPU] = msg;
		release_spinlock(&sCPUMessageSpinlock[targetCPU]);

		arch_smp_send_ici(targetCPU);

		if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
			// wait for the other cpu to finish processing it
			// the interrupt handler will ref count it to <0
			// if the message is sync after it has removed it from the mailbox
			while (msg->done == false) {
				process_all_pending_ici(currentCPU);
				PAUSE();
			}
			// for SYNC messages, it's our responsibility to put it
			// back into the free list
			return_free_message(msg);
		}

		restore_interrupts(state);
	}
}
Exemplo n.º 10
0
static int32
x86_spurious_interrupt(void *data)
{
	// spurious interrupt
	TRACE(("spurious interrupt on cpu %" B_PRId32 "\n", smp_get_current_cpu()));

	// spurious interrupts must not be acknowledged as it does not expect
	// a end of interrupt - if we still do it we would loose the next best
	// interrupt
	return B_HANDLED_INTERRUPT;
}
Exemplo n.º 11
0
static void
push_latency(spinlock* lock)
{
	if (!sEnableLatencyCheck)
		return;

	int32 cpu = smp_get_current_cpu();
	int32 index = (++sLatencyIndex[cpu]) % NUM_LATENCY_LOCKS;

	sLatency[cpu][index].lock = lock;
	sLatency[cpu][index].timestamp = system_time();
}
Exemplo n.º 12
0
void
acquire_spinlock(spinlock* lock)
{
#if DEBUG_SPINLOCKS
	if (are_interrupts_enabled()) {
		panic("acquire_spinlock: attempt to acquire lock %p with interrupts "
			"enabled", lock);
	}
#endif

	if (sNumCPUs > 1) {
		int currentCPU = smp_get_current_cpu();
#if B_DEBUG_SPINLOCK_CONTENTION
		while (atomic_add(&lock->lock, 1) != 0)
			process_all_pending_ici(currentCPU);
#else
		while (1) {
			uint32 count = 0;
			while (*lock != 0) {
				if (++count == SPINLOCK_DEADLOCK_COUNT) {
					panic("acquire_spinlock(): Failed to acquire spinlock %p "
						"for a long time!", lock);
					count = 0;
				}

				process_all_pending_ici(currentCPU);
				PAUSE();
			}
			if (atomic_or((int32*)lock, 1) == 0)
				break;
		}

#	if DEBUG_SPINLOCKS
		push_lock_caller(arch_debug_get_caller(), lock);
#	endif
#endif
	} else {
#if DEBUG_SPINLOCKS
		int32 oldValue;
		oldValue = atomic_or((int32*)lock, 1);
		if (oldValue != 0) {
			panic("acquire_spinlock: attempt to acquire lock %p twice on "
				"non-SMP system (last caller: %p, value %" B_PRId32 ")", lock,
				find_lock_caller(lock), oldValue);
		}

		push_lock_caller(arch_debug_get_caller(), lock);
#endif
	}
#if DEBUG_SPINLOCK_LATENCIES
	push_latency(lock);
#endif
}
Exemplo n.º 13
0
/*!	Notifies the profiler thread when the profiling buffer is full enough.
	The caller must hold fLock.
*/
inline void
SystemProfiler::_MaybeNotifyProfilerThreadLocked()
{
	// If the buffer is full enough, notify the profiler.
	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
		int cpu = smp_get_current_cpu();
		fReentered[cpu] = true;

		SpinLocker _(fWaitingProfilerThread->scheduler_lock);
		thread_unblock_locked(fWaitingProfilerThread, B_OK);

		fWaitingProfilerThread = NULL;
		fReentered[cpu] = false;
	}
}
Exemplo n.º 14
0
static void
test_latency(spinlock* lock)
{
	if (!sEnableLatencyCheck)
		return;

	int32 cpu = smp_get_current_cpu();

	for (int32 i = 0; i < NUM_LATENCY_LOCKS; i++) {
		if (sLatency[cpu][i].lock == lock) {
			bigtime_t diff = system_time() - sLatency[cpu][i].timestamp;
			if (diff > DEBUG_LATENCY && diff < 500000) {
				panic("spinlock %p were held for %lld usecs (%d allowed)\n",
					lock, diff, DEBUG_LATENCY);
			}

			sLatency[cpu][i].lock = NULL;
		}
	}
}
Exemplo n.º 15
0
void
call_all_cpus_sync(void (*func)(void*, int), void* cookie)
{
	cpu_status state = disable_interrupts();
	
	// if inter-CPU communication is not yet enabled, use the early mechanism
	if (!sICIEnabled) {
		call_all_cpus_early(func, cookie);
		restore_interrupts(state);
		return;
	}

	if (smp_get_num_cpus() > 1) {
		smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)cookie,
			0, 0, (void*)func, SMP_MSG_FLAG_SYNC);
	}

	// we need to call this function ourselves as well
	func(cookie, smp_get_current_cpu());

	restore_interrupts(state);
}
Exemplo n.º 16
0
void
SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
{
	int cpu = smp_get_current_cpu();

	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
		// When re-entering, we already hold the lock.

	system_profiler_thread_removed_from_run_queue* event
		= (system_profiler_thread_removed_from_run_queue*)
			_AllocateBuffer(
				sizeof(system_profiler_thread_removed_from_run_queue),
				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
	if (event == NULL)
		return;

	event->time = system_time_nsecs();
	event->thread = thread->id;

	fHeader->size = fBufferSize;

	// unblock the profiler thread, if necessary
	_MaybeNotifyProfilerThreadLocked();
}
Exemplo n.º 17
0
static inline depot_cpu_store*
object_depot_cpu(object_depot* depot)
{
	return &depot->stores[smp_get_current_cpu()];
}
Exemplo n.º 18
0
void
x86_page_fault_exception(struct iframe* frame)
{
	Thread* thread = thread_get_current_thread();
	addr_t cr2 = x86_read_cr2();
	addr_t newip;

	if (debug_debugger_running()) {
		// If this CPU or this thread has a fault handler, we're allowed to be
		// here.
		if (thread != NULL) {
			cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
			if (cpu->fault_handler != 0) {
				debug_set_page_fault_info(cr2, frame->ip,
					(frame->error_code & 0x2) != 0
						? DEBUG_PAGE_FAULT_WRITE : 0);
				frame->ip = cpu->fault_handler;
				frame->bp = cpu->fault_handler_stack_pointer;
				return;
			}

			if (thread->fault_handler != 0) {
				kprintf("ERROR: thread::fault_handler used in kernel "
					"debugger!\n");
				debug_set_page_fault_info(cr2, frame->ip,
					(frame->error_code & 0x2) != 0
						? DEBUG_PAGE_FAULT_WRITE : 0);
				frame->ip = reinterpret_cast<uintptr_t>(thread->fault_handler);
				return;
			}
		}

		// otherwise, not really
		panic("page fault in debugger without fault handler! Touching "
			"address %p from ip %p\n", (void*)cr2, (void*)frame->ip);
		return;
	} else if ((frame->flags & 0x200) == 0) {
		// interrupts disabled

		// If a page fault handler is installed, we're allowed to be here.
		// TODO: Now we are generally allowing user_memcpy() with interrupts
		// disabled, which in most cases is a bug. We should add some thread
		// flag allowing to explicitly indicate that this handling is desired.
		uintptr_t handler = reinterpret_cast<uintptr_t>(thread->fault_handler);
		if (thread && thread->fault_handler != 0) {
			if (frame->ip != handler) {
				frame->ip = handler;
				return;
			}

			// The fault happened at the fault handler address. This is a
			// certain infinite loop.
			panic("page fault, interrupts disabled, fault handler loop. "
				"Touching address %p from ip %p\n", (void*)cr2,
				(void*)frame->ip);
		}

		// If we are not running the kernel startup the page fault was not
		// allowed to happen and we must panic.
		panic("page fault, but interrupts were disabled. Touching address "
			"%p from ip %p\n", (void*)cr2, (void*)frame->ip);
		return;
	} else if (thread != NULL && thread->page_faults_allowed < 1) {
		panic("page fault not allowed at this place. Touching address "
			"%p from ip %p\n", (void*)cr2, (void*)frame->ip);
		return;
	}

	enable_interrupts();

	vm_page_fault(cr2, frame->ip,
		(frame->error_code & 0x2)!= 0,		// write access
		(frame->error_code & 0x10) != 0,	// instruction fetch
		(frame->error_code & 0x4) != 0,		// userland
		&newip);
	if (newip != 0) {
		// the page fault handler wants us to modify the iframe to set the
		// IP the cpu will return to this ip
		frame->ip = newip;
	}
}
Exemplo n.º 19
0
RTDECL(RTCPUID) RTMpCpuId(void)
{
    return smp_get_current_cpu();
}
Exemplo n.º 20
0
extern "C" void
arch_arm_data_abort(struct iframe *frame)
{
	Thread *thread = thread_get_current_thread();
	bool isUser = (frame->spsr & 0x1f) == 0x10;
	addr_t far = arm_get_far();
	bool isWrite = true;
	addr_t newip = 0;

#ifdef TRACE_ARCH_INT
	print_iframe("Data Abort", frame);
	dprintf("FAR: %08lx, thread: %s\n", far, thread->name);
#endif

	IFrameScope scope(frame);

	if (debug_debugger_running()) {
		// If this CPU or this thread has a fault handler, we're allowed to be
		// here.
		if (thread != NULL) {
			cpu_ent* cpu = &gCPU[smp_get_current_cpu()];

			if (cpu->fault_handler != 0) {
				debug_set_page_fault_info(far, frame->pc,
					isWrite ? DEBUG_PAGE_FAULT_WRITE : 0);
				frame->svc_sp = cpu->fault_handler_stack_pointer;
				frame->pc = cpu->fault_handler;
				return;
			}

			if (thread->fault_handler != 0) {
				kprintf("ERROR: thread::fault_handler used in kernel "
					"debugger!\n");
				debug_set_page_fault_info(far, frame->pc,
						isWrite ? DEBUG_PAGE_FAULT_WRITE : 0);
				frame->pc = thread->fault_handler;
				return;
			}
		}

		// otherwise, not really
		panic("page fault in debugger without fault handler! Touching "
			"address %p from pc %p\n", (void *)far, (void *)frame->pc);
		return;
	} else if ((frame->spsr & (1 << 7)) != 0) {
		// interrupts disabled

		// If a page fault handler is installed, we're allowed to be here.
		// TODO: Now we are generally allowing user_memcpy() with interrupts
		// disabled, which in most cases is a bug. We should add some thread
		// flag allowing to explicitly indicate that this handling is desired.
		if (thread && thread->fault_handler != 0) {
			if (frame->pc != thread->fault_handler) {
				frame->pc = thread->fault_handler;
				return;
			}

			// The fault happened at the fault handler address. This is a
			// certain infinite loop.
			panic("page fault, interrupts disabled, fault handler loop. "
				"Touching address %p from pc %p\n", (void*)far,
				(void*)frame->pc);
		}

		// If we are not running the kernel startup the page fault was not
		// allowed to happen and we must panic.
		panic("page fault, but interrupts were disabled. Touching address "
			"%p from pc %p\n", (void *)far, (void *)frame->pc);
		return;
	} else if (thread != NULL && thread->page_faults_allowed < 1) {
		panic("page fault not allowed at this place. Touching address "
			"%p from pc %p\n", (void *)far, (void *)frame->pc);
		return;
	}

	enable_interrupts();

	vm_page_fault(far, frame->pc, isWrite, false, isUser, &newip);

	if (newip != 0) {
		// the page fault handler wants us to modify the iframe to set the
		// IP the cpu will return to to be this ip
		frame->pc = newip;
	}
}
Exemplo n.º 21
0
void
smp_send_broadcast_ici(int32 message, addr_t data, addr_t data2, addr_t data3,
	void *dataPointer, uint32 flags)
{
	struct smp_msg *msg;

	TRACE(("smp_send_broadcast_ici: cpu %ld mess 0x%lx, data 0x%lx, data2 "
		"0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n", smp_get_current_cpu(),
		message, data, data2, data3, dataPointer, flags));

	if (sICIEnabled) {
		int state;
		int currentCPU;

		// find_free_message leaves interrupts disabled
		state = find_free_message(&msg);

		currentCPU = smp_get_current_cpu();

		msg->message = message;
		msg->data = data;
		msg->data2 = data2;
		msg->data3 = data3;
		msg->data_ptr = dataPointer;
		msg->ref_count = sNumCPUs - 1;
		msg->flags = flags;
		msg->proc_bitmap = SET_BIT(0, currentCPU);
		msg->done = false;

		TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast "
			"mbox\n", currentCPU, msg));

		// stick it in the appropriate cpu's mailbox
		acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
		msg->next = sBroadcastMessages;
		sBroadcastMessages = msg;
		release_spinlock(&sBroadcastMessageSpinlock);

		arch_smp_send_broadcast_ici();

		TRACE(("smp_send_broadcast_ici: sent interrupt\n"));

		if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
			// wait for the other cpus to finish processing it
			// the interrupt handler will ref count it to <0
			// if the message is sync after it has removed it from the mailbox
			TRACE(("smp_send_broadcast_ici: waiting for ack\n"));

			while (msg->done == false) {
				process_all_pending_ici(currentCPU);
				PAUSE();
			}

			TRACE(("smp_send_broadcast_ici: returning message to free list\n"));

			// for SYNC messages, it's our responsibility to put it
			// back into the free list
			return_free_message(msg);
		}

		restore_interrupts(state);
	}

	TRACE(("smp_send_broadcast_ici: done\n"));
}
Exemplo n.º 22
0
void
smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, addr_t data,
	addr_t data2, addr_t data3, void *dataPointer, uint32 flags)
{
	if (!sICIEnabled)
		return;

	int currentCPU = smp_get_current_cpu();
	cpuMask &= ~((cpu_mask_t)1 << currentCPU)
		& (((cpu_mask_t)1 << sNumCPUs) - 1);
	if (cpuMask == 0) {
		panic("smp_send_multicast_ici(): 0 CPU mask");
		return;
	}

	// count target CPUs
	int32 targetCPUs = 0;
	for (int32 i = 0; i < sNumCPUs; i++) {
		if ((cpuMask & (cpu_mask_t)1 << i) != 0)
			targetCPUs++;
	}

	// find_free_message leaves interrupts disabled
	struct smp_msg *msg;
	int state = find_free_message(&msg);

	msg->message = message;
	msg->data = data;
	msg->data2 = data2;
	msg->data3 = data3;
	msg->data_ptr = dataPointer;
	msg->ref_count = targetCPUs;
	msg->flags = flags;
	msg->proc_bitmap = ~cpuMask;
	msg->done = false;

	// stick it in the broadcast mailbox
	acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
	msg->next = sBroadcastMessages;
	sBroadcastMessages = msg;
	release_spinlock(&sBroadcastMessageSpinlock);

	arch_smp_send_broadcast_ici();
		// TODO: Introduce a call that only bothers the target CPUs!

	if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
		// wait for the other cpus to finish processing it
		// the interrupt handler will ref count it to <0
		// if the message is sync after it has removed it from the mailbox
		while (msg->done == false) {
			process_all_pending_ici(currentCPU);
			PAUSE();
		}

		// for SYNC messages, it's our responsibility to put it
		// back into the free list
		return_free_message(msg);
	}

	restore_interrupts(state);
}