void sp_wait( void ) { HAS_CRITICAL_SECTION; ENTER_CRITICAL_SECTION(); CURRENT_TASK()->errno = 0; CURRENT_TASK()->status = TASK_WAITING; LEAVE_CRITICAL_SECTION(); }
void sp_wait_object( void* obj ) { HAS_CRITICAL_SECTION; ENTER_CRITICAL_SECTION(); CURRENT_TASK()->errno = 0; CURRENT_TASK()->status = TASK_WAITING; CURRENT_TASK()->wait_obj = obj; LEAVE_CRITICAL_SECTION(); }
int sys_lseek(int fd, long off, int where) { struct file *file; if (fd > NR_OPEN || !(file = (CURRENT_TASK() )->file[fd])) return -EBADF; switch (where) { case SEEK_SET: if (off < 0) return -EINVAL; file->f_pos = off; break; case SEEK_CUR: if (file->f_pos + off < 0) return -EINVAL; file->f_pos += off; break; case SEEK_END: if (file->f_inode->i_size + off < 0) return -EINVAL; file->f_pos = file->f_inode->i_size + off; break; default: return -EINVAL; } return file->f_pos; }
void do_signal(struct trapframe *tf) { struct task *current; sigset_t signal; int signr; current = CURRENT_TASK(); signal = (current->sig_signal) & (~(current->sig_mask)); if (!signal) return; for (signr = 0; signr < 31; signr++) { if (!(signal & (1 << signr))) continue; if (signr == SIGCHLD - 1) continue; else do_exit(signr + 1); } current->sig_signal &= ~signal; }
int sys_access(char *filename, int mode) { struct inode *inode; mode_t tmp_mode = 0; struct task *current; if (!(inode = namei(filename, NULL))) return -EACCESS; if (mode == F_OK) { iput(inode); return 0; } mode &= 7; current = CURRENT_TASK(); if (current->uid == inode->i_uid) tmp_mode |= (mode) << 6; if (current->gid == inode->i_gid) tmp_mode |= (mode) << 3; tmp_mode |= (mode); if (tmp_mode & inode->i_mode) { iput(inode); return 0; } iput(inode); return -EACCESS; }
// // Assume interrupt is disabled // When this function returns, the interrupt is always enabled because // spk_schedule_task(). // static void spk_dispatch( task_cb* task ) { task_cb* prev; prev = CURRENT_TASK(); CURRENT_TASK() = task; task->status = TASK_ACTIVE; task->wait_obj = NULL; ENABLE_INT(); (task->task)(); DISABLE_INT(); CURRENT_TASK() = prev; // // Dispatch task that has higher priority // spk_schedule_task(); }
void spk_init( void ) { sp_pid_t i; CURRENT_TASK() = NULL; spk_ready_list = NULL; for( i = 0; i < SPK_MAX_TASKS; i++ ) { spk_tasks[i].task = NULL; spk_tasks[i].pid = i; } spk_in_interrupt = 1; spk_num_tasks = 0; }
int sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { struct file *file; if (fd > NR_OPEN || !(file = (CURRENT_TASK() )->file[fd])) return -EBADF; switch (cmd) { case F_DUPFD: return dupfd(fd, arg); default: return -EIO; } }
int sys_chdir(char *path) { struct inode *inode; struct task *current = CURRENT_TASK(); if (!(inode = namei(path, NULL))) return -ENOENT; if (!(S_ISDIR(inode->i_mode))) { iput(inode); return -ENOTDIR; } iput(current->pwd); current->pwd = inode; iunlock(inode); return 0; }
int sys_write(int fd, char *buf, size_t size) { struct file *file; struct inode *inode; int res; if (fd > NR_OPEN || !(file = (CURRENT_TASK() )->file[fd])) return -EBADF; if (!(file->f_mode & O_WRITE)) return -EBADF; inode = idup(file->f_inode); if (file->f_mode & O_APPEND) file->f_pos = inode->i_size; switch (inode->i_mode & S_IFMT) { case S_IFREG: res = write_file(inode, buf, file->f_pos, size); break; case S_IFDIR: res = -EISDIR; break; case S_IFCHR: res = write_char(inode->i_rdev, buf, file->f_pos, size); break; case S_IFBLK: res = write_blk(inode->i_rdev, buf, file->f_pos, size); break; case S_IFIFO: res= write_pipe(inode,buf,size); break; default: res = -EIO; } if (res > 0) file->f_pos += res; if(file->f_pos>inode->i_size) inode->i_size=file->f_pos; iput(inode); return res; }
int sys_sigact(unsigned int signr, struct sigaction *newact, struct sigaction *oldact) { struct task *current; if (signr > 32 || signr == SIGKILL) return -EINVAL; current = CURRENT_TASK(); if (oldact) memcpy(oldact, &(current->sigtable[signr-1]), sizeof(sigaction)); if (newact) memcpy(&(current->sigtable[signr-1]), newact, sizeof(sigaction)); return 0; }
static int dupfd(int fd, unsigned int arg) { struct task *current; if (arg > NR_OPEN) return -EBADF; if (arg) sys_close(arg); current = CURRENT_TASK(); while (arg < NR_OPEN) { if (current->file[arg]) { arg++; continue; } current->file[arg] = current->file[fd]; current->file[fd]->f_count++; return arg; } return -EMFILE; }
struct buffer * bread(dev_t dev, long block) { struct buffer *buf = getblk(dev, block); if (buf == NULL) panic("bread:get_buffer return NULL"); if (!(buf->b_flag & B_VALID)) { rw_block(READ_BUF, buf); /* * lock buffer without to check how hold the lock. * buffer would be unlock by driver. */ irq_lock(); while(buf->b_lock.pid){ sleep_on(&(buf->b_lock.wait)); } buf->b_lock.pid=(CURRENT_TASK())->pid; irq_unlock(); } return buf; }
int sys_sigmask(int how, sigset_t *set, sigset_t *oset) { struct task *current; current = CURRENT_TASK(); if (oset) *oset = current->sig_mask; if (set) { switch (how) { case SIG_BLOCK: current->sig_mask = current->sig_mask | *set; //current->sig_mask&=(~(1<<(SIGKILL-1))|1<<(SIGSTOP-1)); break; case SIG_UNBLOCK: current->sig_mask = current->sig_mask & (~(*set)); break; case SIG_SETMASK: current->sig_mask = (*set); } } return current->sig_mask; }
int sys_open(char *path, int flag, mode_t mode) { extern int sys_mknod(char *filename, mode_t mode, dev_t dev); int fd; struct file *file; struct inode *inode; struct task *current = CURRENT_TASK(); for (fd = 0; fd < NR_OPEN; fd++) { if (!current->file[fd]) break; } if (fd >= NR_OPEN) return -EINVAL; for (file = file_table; file < file_table + NR_FILE; file++) if (!file->f_count) break; if (file >= file_table + NR_FILE) return -EINVAL; if (!(inode = namei(path, NULL))) { if (!(flag & O_CREAT)) { if (sys_mknod(path, S_IFREG | (mode & 07777), 0) < 0) return -EAGAIN; } inode = namei(path, NULL); } iunlock(inode); file->f_count = 1; file->f_inode = inode; file->f_pos = 0; file->f_mode = flag; current->file[fd] = file; return fd; }
int sys_sigwait() { (CURRENT_TASK() )->state = TASK_SIGWAIT; sched(); return -EINTR; }
char * generate_elf_header(int type, int fd, char *filename) { int i, n; char *buffer, *ptr; Elf64_Ehdr *elf; Elf64_Phdr *notes; Elf64_Phdr *load; size_t offset, len, l_offset; size_t data_offset; struct elf_prpsinfo_64 prpsinfo; union prstatus prstatus; int prstatus_len; ushort e_machine; int num_segments; struct node_table *nt; struct SNAP_info { ulonglong task_struct; ulonglong arch_data1; ulonglong arch_data2; } SNAP_info; num_segments = vt->numnodes; if (machine_type("X86_64")) { e_machine = EM_X86_64; prstatus_len = sizeof(prstatus.x86_64); num_segments += 1; /* mapped kernel section for phys_base */ } else if (machine_type("X86")) { e_machine = EM_386; prstatus_len = sizeof(prstatus.x86); } else if (machine_type("IA64")) { e_machine = EM_IA_64; prstatus_len = sizeof(prstatus.ia64); num_segments += 1; /* mapped kernel section for phys_start */ } else if (machine_type("PPC64")) { e_machine = EM_PPC64; prstatus_len = sizeof(prstatus.ppc64); } else if (machine_type("ARM64")) { e_machine = EM_AARCH64; prstatus_len = sizeof(prstatus.arm64); } else return NULL; /* should be enought for the notes + roundup + two blocks */ buffer = (char *)GETBUF(sizeof(Elf64_Ehdr) + num_segments * sizeof(Elf64_Phdr) + PAGESIZE() * 2); offset = 0; ptr = buffer; /* Elf header */ elf = (Elf64_Ehdr *)ptr; memcpy(elf->e_ident, ELFMAG, SELFMAG); elf->e_ident[EI_CLASS] = ELFCLASS64; #if __BYTE_ORDER == __BIG_ENDIAN elf->e_ident[EI_DATA] = ELFDATA2MSB; #else elf->e_ident[EI_DATA] = ELFDATA2LSB; #endif elf->e_ident[EI_VERSION] = EV_CURRENT; elf->e_ident[EI_OSABI] = ELFOSABI_SYSV; elf->e_ident[EI_ABIVERSION] = 0; memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); elf->e_type = ET_CORE; elf->e_machine = e_machine; elf->e_version = EV_CURRENT; elf->e_entry = 0; elf->e_phoff = sizeof(Elf64_Ehdr); elf->e_shoff = 0; elf->e_flags = 0; elf->e_ehsize = sizeof(Elf64_Ehdr); elf->e_phentsize = sizeof(Elf64_Phdr); elf->e_phnum = 1 + num_segments; elf->e_shentsize = 0; elf->e_shnum = 0; elf->e_shstrndx = 0; offset += sizeof(Elf64_Ehdr); ptr += sizeof(Elf64_Ehdr); /* PT_NOTE */ notes = (Elf64_Phdr *)ptr; notes->p_type = PT_NOTE; notes->p_offset = 0; /* TO BE FILLED IN */ notes->p_vaddr = 0; notes->p_paddr = 0; notes->p_filesz = 0; /* TO BE FILLED IN */ notes->p_memsz = 0; notes->p_flags = 0; notes->p_align = 0; offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); /* PT_LOAD */ load = (Elf64_Phdr *)ptr; for (i = n = 0; i < num_segments; i++) { load[i].p_type = PT_LOAD; load[i].p_offset = 0; /* TO BE FILLED IN */ switch (e_machine) { case EM_X86_64: nt = &vt->node_table[n]; if (i == 0) { #ifdef X86_64 load[i].p_vaddr = __START_KERNEL_map; load[i].p_paddr = machdep->machspec->phys_base; #endif load[i].p_filesz = 0; load[i].p_memsz = load[i].p_filesz; } else { load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; n++; } load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = 0; break; case EM_386: nt = &vt->node_table[n++]; load[i].p_vaddr = 0; load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_IA_64: nt = &vt->node_table[n]; if (i == 0) { #ifdef IA64 load[i].p_vaddr = machdep->machspec->kernel_start; load[i].p_paddr = machdep->machspec->phys_start; #endif load[i].p_filesz = 0; load[i].p_memsz = load[i].p_filesz; } else { load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; n++; } load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_PPC64: nt = &vt->node_table[n++]; load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_AARCH64: nt = &vt->node_table[n++]; load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; } // l_offset += load[i].p_filesz; offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); } notes->p_offset = offset; /* NT_PRSTATUS note */ memset(&prstatus, 0, sizeof(prstatus)); len = dump_elf_note(ptr, NT_PRSTATUS, "CORE", (char *)&prstatus, prstatus_len); offset += len; ptr += len; notes->p_filesz += len; /* NT_PRPSINFO note */ memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo_64)); prpsinfo.pr_state = 0; prpsinfo.pr_sname = 'R'; prpsinfo.pr_zomb = 0; strcpy(prpsinfo.pr_fname, "vmlinux"); len = dump_elf_note(ptr, NT_PRPSINFO, "CORE", (char *)&prpsinfo, sizeof(prpsinfo)); offset += len; ptr += len; notes->p_filesz += len; /* NT_TASKSTRUCT note */ SNAP_info.task_struct = CURRENT_TASK(); #ifdef X86_64 SNAP_info.arch_data1 = kt->relocate; SNAP_info.arch_data2 = 0; #elif ARM64 SNAP_info.arch_data1 = machdep->machspec->kimage_voffset; SNAP_info.arch_data2 = (machdep->machspec->VA_BITS_ACTUAL << 32) | machdep->machspec->CONFIG_ARM64_VA_BITS; #else SNAP_info.arch_data1 = 0; SNAP_info.arch_data2 = 0; #endif len = dump_elf_note (ptr, NT_TASKSTRUCT, "SNAP", (char *)&SNAP_info, sizeof(struct SNAP_info)); offset += len; ptr += len; notes->p_filesz += len; if (type == NETDUMP_ELF64) offset = roundup (offset, PAGESIZE()); l_offset = offset; for (i = 0; i < num_segments; i++) { load[i].p_offset = l_offset; l_offset += load[i].p_filesz; } data_offset = offset; while (offset > 0) { len = write(fd, buffer + (data_offset - offset), offset); if (len < 0) { perror(filename); FREEBUF(buffer); return NULL; } offset -= len; } return buffer; }