Beispiel #1
0
/*! Transmit physically continuous data */
static inline status_t
transfer_PIO_physcont(ide_device_info *device, addr_t physicalAddress,
	int length, bool write, int *transferred)
{
	// we must split up chunk into B_PAGE_SIZE blocks as we can map only
	// one page into address space at once
	while (length > 0) {
		addr_t virtualAddress;
		void* handle;
		int page_left, cur_len;
		status_t err;
		struct thread* thread = thread_get_current_thread();

		SHOW_FLOW(4, "Transmitting to/from physical address %lx, %d bytes left",
			physicalAddress, length);

		thread_pin_to_current_cpu(thread);
		if (vm_get_physical_page_current_cpu(physicalAddress, &virtualAddress,
				&handle) != B_OK) {
			thread_unpin_from_current_cpu(thread);
			// ouch: this should never ever happen
			set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
			return B_ERROR;
		}

		// if chunks starts in the middle of a page, we have even less then
		// a page left
		page_left = B_PAGE_SIZE - physicalAddress % B_PAGE_SIZE;

		SHOW_FLOW(4, "page_left=%d", page_left);

		cur_len = min(page_left, length);

		SHOW_FLOW(4, "cur_len=%d", cur_len);

		err = transfer_PIO_virtcont(device, (uint8 *)virtualAddress,
			cur_len, write, transferred);

		vm_put_physical_page_current_cpu(virtualAddress, handle);
		thread_unpin_from_current_cpu(thread);

		if (err != B_OK)
			return err;

		length -= cur_len;
		physicalAddress += cur_len;
	}

	return B_OK;
}
void
M68KVMTranslationMap::Flush()
{
	if (fInvalidPagesCount <= 0)
		return;

	Thread* thread = thread_get_current_thread();
	thread_pin_to_current_cpu(thread);

	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
		// invalidate all pages
		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
			fInvalidPagesCount);

		if (fIsKernelMap) {
			arch_cpu_global_TLB_invalidate();
			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
				NULL, SMP_MSG_FLAG_SYNC);
		} else {
			cpu_status state = disable_interrupts();
			arch_cpu_user_TLB_invalidate();
			restore_interrupts(state);

			int cpu = smp_get_current_cpu();
			CPUSet cpuMask = PagingStructures()->active_on_cpus;
			cpuMask.ClearBit(cpu);

			if (!cpuMask.IsEmpty()) {
				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
			}
		}
	} else {
		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
			fInvalidPagesCount);

		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);

		if (fIsKernelMap) {
			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
				SMP_MSG_FLAG_SYNC);
		} else {
			int cpu = smp_get_current_cpu();
			CPUSet cpuMask = PagingStructures()->active_on_cpus;
			cpuMask.ClearBit(cpu);

			if (!cpuMask.IsEmpty()) {
				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
					SMP_MSG_FLAG_SYNC);
			}
		}
	}
	fInvalidPagesCount = 0;

	thread_unpin_from_current_cpu(thread);
}