void process_main(void) { while (1) { if (rand() % ALLOC_SLOWDOWN == 0) { if (sys_fork() == 0) { break; } } else { sys_yield(); } } pid_t p = sys_getpid(); srand(p); // The heap starts on the page right after the 'end' symbol, // whose address is the first address not allocated to process code // or data. heap_top = ROUNDUP((uint8_t*) end, PAGESIZE); // The bottom of the stack is the first address on the current // stack page (this process never needs more than one stack page). stack_bottom = ROUNDDOWN((uint8_t*) read_rsp() - 1, PAGESIZE); // Allocate heap pages until (1) hit the stack (out of address space) // or (2) allocation fails (out of physical memory). while (1) { int x = rand() % (8 * ALLOC_SLOWDOWN); if (x < 8 * p) { if (heap_top == stack_bottom || sys_page_alloc(heap_top) < 0) { break; } *heap_top = p; /* check we have write access to new page */ heap_top += PAGESIZE; if (console[CPOS(24, 0)]) { /* clear "Out of physical memory" msg */ console_printf(CPOS(24, 0), 0, "\n"); } } else if (x == 8 * p) { if (sys_fork() == 0) { p = sys_getpid(); } } else if (x == 8 * p + 1) { sys_exit(); } else { sys_yield(); } } // After running out of memory while (1) { if (rand() % (2 * ALLOC_SLOWDOWN) == 0) { sys_exit(); } else { sys_yield(); } } }
/** * Return a copy of the current task. */ struct task_struct *fork_curr_task(void) { struct task_struct *task; uint64_t *kstack, *curr_kstack; int i; kstack = (uint64_t *)get_free_page(0); if(!kstack) return NULL; task = kmalloc(sizeof(*task)); if(!task) goto out_stack; /* Half the remaining timeslice (split between parent and child) */ curr_task->timeslice >>= 1; memcpy(task, curr_task, sizeof(*task)); /* Exact copy of parent */ /* deep copy the current mm */ task->mm = mm_deep_copy(); if(task->mm == NULL) goto out_task; /* Copy the curr_task's kstack */ curr_kstack = (uint64_t *)ALIGN_DOWN(read_rsp(), PAGE_SIZE); memcpy(kstack, curr_kstack, PAGE_SIZE); task->kernel_rsp = (uint64_t)&kstack[510]; /* new kernel stack */ task->pid = get_next_pid(); /* new pid */ task->parent = curr_task; /* new parent */ task->chld = task->sib = NULL; /* no children/siblings yet */ task->next_task = task->prev_task = task->next_rq = NULL; /* Increment reference counts on any open files */ for(i = 0; i < TASK_FILES_MAX; i++) { struct file *fp = task->files[i]; if(fp) { fp->f_count++; } } /* Add this new child to the parent */ add_child(curr_task, task); /* TODO: Here we steal our parent's foreground status */ if(curr_task->pid > 2) curr_task->foreground = 0;/* change to 1; to let all tasks read */ task_add_new(task); /* add to run queue and task list */ return task; out_task: kfree(task); out_stack: free_page((uint64_t)kstack); return NULL; }
void debug_iretq(uint64_t fault_rip, uint64_t cs, uint64_t rflags, uint64_t rsp, uint64_t ss) { debug("CURR_RSP:%p\nIRETQ: RSP:%p RIP:%p SS:%p RFLAGS:%p CS:%p\n", read_rsp(), (void*)rsp, (void*)fault_rip, (void*)ss, (void*)rflags, (void*)cs); }