Beispiel #1
0
int kt_kernel_idle_task(void)
{
	tm_thread_raise_flag(current_thread, THREAD_KERNEL);
	kthread_create(&kthread_pager, "[kpager]", 0, __KT_pager, 0);
	strncpy((char *)current_process->command, "[kernel]", 128);
	/* wait until init has successfully executed, and then remap. */
	while(!(kernel_state_flags & KSF_HAVEEXECED)) {
		tm_schedule();
	}
	printk(1, "[kernel]: remapping lower memory with protection flags...\n");
	cpu_interrupt_set(0);
	for(addr_t addr = MEMMAP_KERNEL_START; addr < MEMMAP_KERNEL_END; addr += PAGE_SIZE)
	{
		mm_virtual_changeattr(addr, PAGE_PRESENT | PAGE_WRITE, PAGE_SIZE);
	}
	cpu_interrupt_set(1);
	/* Now enter the main idle loop, waiting to do periodic cleanup */
	printk(0, "[idle]: entering background loop\n");
	for(;;) {
		assert(!current_thread->held_locks);
		int r=1;
		if(__current_cpu->work.count > 0)
			r=workqueue_dowork(&__current_cpu->work);
		else
			tm_schedule();
		int status;
		int pid = sys_waitpid(-1, &status, WNOHANG);
		if(WIFSTOPPED(status)) {
			sys_kill(pid, SIGKILL);
		}
	}
}
Beispiel #2
0
/* This gets run when the interrupt hasn't returned yet, but it's now maybe 'safe' to
 * do things - specifically, we've been "ack'd", and interrupt_level is now
 * decremented */
void cpu_interrupt_post_handling(void)
{
	if(!current_thread->interrupt_level && !current_thread->system) {
		if(current_thread->flags & THREAD_TICKER_DOWORK) {
			ticker_dowork(&__current_cpu->ticker);
		}
		if(current_thread->flags & THREAD_SCHEDULE)
			tm_schedule();
		if(current_thread->flags & THREAD_EXIT) {
			tm_thread_do_exit();
			tm_schedule();
		}
	}
}
Beispiel #3
0
void cpu_handle_ipi_reschedule(struct registers *regs)
{
	// We don't allow a reschedule request if this cpu has interrupts disabled.
	if(!cpu_interrupt_get_flag())
		return;
	tm_schedule();
}
Beispiel #4
0
void tm_thread_enter_system(int sys)
{
	// Check for PTRACE event.
	if((current_thread->flags & THREAD_PTRACED) && (current_thread->tracee_flags & TRACEE_STOPON_SYSCALL)) {
		current_thread->tracee_flags &= ~TRACEE_STOPON_SYSCALL;
		current_thread->orig_syscall = sys;
		current_thread->syscall_return = 0;
		tm_signal_send_thread(current_thread, SIGTRAP);
		tm_schedule();
	}

	current_thread->system=(!sys ? -1 : sys);
}
Beispiel #5
0
void tm_thread_exit_system(long sys, long ret)
{
	current_thread->system=0;
	// Check for PTRACE event.
	if((current_thread->flags & THREAD_PTRACED) && (current_thread->tracee_flags & TRACEE_STOPON_SYSCALL)) {
		current_thread->tracee_flags &= ~TRACEE_STOPON_SYSCALL;
		current_thread->orig_syscall = sys;
		current_thread->syscall_return = ret;
		tm_signal_send_thread(current_thread, SIGTRAP);
		tm_schedule();
	}

	// If we have a signal, then we've been ignoring it up until now because we were inside a syscall. Set the schedule flag so we can handle that now.
	if(tm_thread_got_signal(current_thread))
		tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
}
Beispiel #6
0
void __mutex_acquire(struct mutex *m, char *file, int line)
{
	assert(m->magic == MUTEX_MAGIC);

	if(unlikely(kernel_state_flags & KSF_DEBUGGING))
		return;
	if(unlikely(current_thread && current_thread->interrupt_level))
		panic(PANIC_NOSYNC, "cannot lock a mutex within interrupt context (%s:%d)", file, line);
	if(unlikely(kernel_state_flags & KSF_SHUTDOWN)) return;
	/* wait until we can set bit 0. once this is done, we have the lock */
#if MUTEX_DEBUG
	int timeout = 8000;
#endif
	if(unlikely(current_thread && __current_cpu->preempt_disable > 0))
		panic(0, "tried to lock schedulable mutex with preempt off");

	if(likely(current_thread != NULL))
		current_thread->held_locks++;

	while(atomic_exchange(&m->lock, true)) {
		if(likely(current_thread != NULL)) {
			/* are we re-locking ourselves? */
			if(m->owner == current_thread)
				panic(0, "tried to relock mutex (%s:%d)",
						file, line);
			/* we can use __current_cpu here, because we're testing if we're the idle
		 	 * thread, and the idle thread never migrates. */
			if(current_thread != __current_cpu->idle_thread) {
				tm_thread_block_confirm(&m->blocklist, THREADSTATE_UNINTERRUPTIBLE,
						__confirm, m);
			} else {
				tm_schedule();
			}
		}
#if MUTEX_DEBUG
		if(--timeout == 0) {
			panic(0, "mutex timeout from %s:%d (owned by %d: %s:%d)\n", file, line, m->pid, m->owner_file, m->owner_line);
		}
#endif
	}
	m->owner = current_thread;
	m->owner_file = file;
	m->owner_line = line;
}
Beispiel #7
0
int ahci_port_dma_data_transfer(struct hba_memory *abar, struct hba_port *port, struct ahci_device *dev, int slot, int write, addr_t virt_buffer, int sectors, uint64_t lba)
{
	int timeout;
	int fis_len = sizeof(struct fis_reg_host_to_device) / 4;
	int ne = ahci_write_prdt(abar, port, dev,
			slot, 0, ATA_SECTOR_SIZE * sectors, virt_buffer);
	struct hba_command_header *h = ahci_initialize_command_header(abar,
			port, dev, slot, write, 0, ne, fis_len);
	struct fis_reg_host_to_device *fis = ahci_initialize_fis_host_to_device(abar,
			port, dev, slot, 1, write ? ATA_CMD_WRITE_DMA_EX : ATA_CMD_READ_DMA_EX);
	fis->device = 1<<6;
	/* WARNING: assumes little-endian */
	fis->count_l = sectors & 0xFF;
	fis->count_h = (sectors >> 8) & 0xFF;
	
	fis->lba0 = (unsigned char)( lba        & 0xFF);
	fis->lba1 = (unsigned char)((lba >> 8)  & 0xFF);
	fis->lba2 = (unsigned char)((lba >> 16) & 0xFF);
	fis->lba3 = (unsigned char)((lba >> 24) & 0xFF);
	fis->lba4 = (unsigned char)((lba >> 32) & 0xFF);
	fis->lba5 = (unsigned char)((lba >> 40) & 0xFF);
	port->sata_error = ~0;
	timeout = ATA_TFD_TIMEOUT;
	while ((port->task_file_data & (ATA_DEV_BUSY | ATA_DEV_DRQ)) && --timeout)
	{
		tm_schedule();
	}
	if(!timeout) goto port_hung;
	
	port->sata_error = ~0;
	ahci_send_command(port, slot);
	timeout = ATA_TFD_TIMEOUT;
	while ((port->task_file_data & (ATA_DEV_BUSY | ATA_DEV_DRQ)) && --timeout)
	{
		tm_schedule();
	}
	if(!timeout) goto port_hung;
	
	timeout = AHCI_CMD_TIMEOUT;
	while(--timeout)
	{
		if(!((port->sata_active | port->command_issue) & (1 << slot)))
			break;
		tm_schedule();
	}
	if(!timeout) goto port_hung;
	if(port->sata_error)
	{
		printk(KERN_DEBUG, "[ahci]: device %d: ahci error\n", dev->idx);
		goto error;
	}
	if(port->task_file_data & ATA_DEV_ERR)
	{
		printk(KERN_DEBUG, "[ahci]: device %d: task file data error\n", dev->idx);
		goto error;
	}
	return 1;
	port_hung:
	printk(KERN_DEBUG, "[ahci]: device %d: port hung\n", dev->idx);
	error:
	printk(KERN_DEBUG, "[ahci]: device %d: tfd=%x, serr=%x\n",
			dev->idx, port->task_file_data, port->sata_error);
	ahci_reset_device(abar, port, dev);
	return 0;
}