Beispiel #1
0
void process_run(process_id_t pid)
{
  context_t user_context;

  thread_table_t *my_thread = thread_get_current_thread_entry();

  /* If my process is a zombie, that means initialisation failed. */
  if (process_table[pid].state == PROCESS_ZOMBIE) {
    if (my_thread->pagetable) {
      vm_destroy_pagetable(my_thread->pagetable);
      my_thread->pagetable = NULL;
    }
    thread_finish();
  }

  process_set_pagetable(my_thread->pagetable);
  my_thread->process_id = pid;
  my_thread->pagetable = my_thread->pagetable;

  /* Initialize the user context. (Status register is handled by
     thread_goto_userland) */
  memoryset(&user_context, 0, sizeof(user_context));

  _context_set_ip(&user_context, process_table[pid].entry_point);
  _context_set_sp(&user_context, process_table[pid].stack_top);

  thread_goto_userland(&user_context);
}
Beispiel #2
0
void physmem_init(void *boot_info)
{
  multiboot_info_t *mb_info = (multiboot_info_t*)boot_info;
  uint64_t *mem_ptr = (uint64_t*)(uint64_t)mb_info->memory_map_addr;
  uint64_t Itr = 0, last_address = 0;

  /* Setup Memory Stuff */
  highest_page = 0;
  memory_size = mb_info->memory_high;
  memory_size += mb_info->memory_low;
  total_blocks = (memory_size * 1024) / PAGE_SIZE;
  used_blocks = total_blocks;
  bitmap_size = total_blocks / PMM_BLOCKS_PER_BYTE;
  _mem_bitmap = (uint64_t*)stalloc(bitmap_size);
  physmem_lock = (spinlock_t*)stalloc(sizeof(spinlock_t));
  spinlock_reset(physmem_lock);

  /* Set all memory as used, and use memory map to set free */
  memoryset(_mem_bitmap, 0xF, bitmap_size);

  /* Physical Page Bitmap */
  kprintf("Memory size: %u Kb\n", (uint32_t)memory_size);

  /* Go through regions */
  for(Itr = (uint64_t)mem_ptr;
      Itr < ((uint64_t)mem_ptr + mb_info->memory_map_length);
      )
    {
      /* Get next member */
      mem_region_t *mem_region = (mem_region_t*)Itr;

      /* Output */
      //kprintf("Memory Region: Address 0x%xL, length 0x%xL, Type %u\n",
      //        mem_region->base_address, mem_region->length, mem_region->Type);

      /* Is it free? */
      if(mem_region->type == MEMTYPE_FREE)
        physmem_freeregion(mem_region->base_address,
                           mem_region->length);

      /* Advance by one structure */
      Itr += sizeof(mem_region_t);
    }

  /* Mark all memory up to the static allocation point as used */
  last_address = (physaddr_t)stalloc(1);
  stalloc_disable();

  for(Itr = physmem_allocblock(); Itr < last_address;)
    Itr = physmem_allocblock();

  /* Debug*/
  kprintf("New memory allocation starts at 0x%xl\n", Itr);
}
Beispiel #3
0
void setup_thread(thread_params_t *params)
{
    context_t user_context;
    uint32_t phys_page;
    int i;
    interrupt_status_t intr_status;
    thread_table_t *thread= thread_get_current_thread_entry();

    /* Copy thread parameters. */
    int arg = params->arg;
    void (*func)(int) = params->func;
    process_id_t pid = thread->process_id = params->pid;
    thread->pagetable = params->pagetable;
    params->done = 1; /* OK, we don't need params any more. */

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    /* Set up userspace environment. */
    memoryset(&user_context, 0, sizeof(user_context));

    user_context.cpu_regs[MIPS_REGISTER_A0] = arg;
    user_context.pc = (uint32_t)func;

    /* Allocate thread stack */
    if (process_table[pid].bot_free_stack != 0) {
        /* Reuse old thread stack. */
        user_context.cpu_regs[MIPS_REGISTER_SP] =
            process_table[pid].bot_free_stack
            + CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE
            - 4; /* Space for the thread argument */
        process_table[pid].bot_free_stack =
            *(uint32_t*)process_table[pid].bot_free_stack;
    } else {
        /* Allocate physical pages (frames) for the stack. */
        for (i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
            phys_page = pagepool_get_phys_page();
            KERNEL_ASSERT(phys_page != 0);
            vm_map(thread->pagetable, phys_page,
                    process_table[pid].stack_end - (i+1)*PAGE_SIZE, 1);
        }
        user_context.cpu_regs[MIPS_REGISTER_SP] =
            process_table[pid].stack_end-4; /* Space for the thread argument */
        process_table[pid].stack_end -= PAGE_SIZE*CONFIG_USERLAND_STACK_SIZE;
    }

    tlb_fill(thread->pagetable);

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);

    thread_goto_userland(&user_context);
}
Beispiel #4
0
/* Sets context for process */
void process_run(process_id_t pid) {
  context_t user_context;

  kprintf("vi er nu i process run\n");
  
  process_set_pagetable(thread_get_thread_entry(thread_get_current_thread())->pagetable);

  /* Initialize the user context. (Status register is handled by
     thread_goto_userland) */
  memoryset(&user_context, 0, sizeof(user_context));
  
  _context_set_ip(&user_context, process_table[pid].entry_point);
  _context_set_sp(&user_context, process_table[pid].stack_top);


  kprintf("lige før thread goto");

  thread_goto_userland(&user_context);
}
Beispiel #5
0
Datei: elf.c Projekt: PtxDK/OSM
/**
 * Parse useful information from a given ELF file into the ELF info
 * structure.
 *
 * @param file The ELF file
 *
 * @param elf Information found in the file is returned in this
 * structure. In case of error this structure may contain arbitrary
 * values.
 *
 * @return 0 on failure, other values indicate success.
 */
int elf_parse_header(elf_info_t *elf, openfile_t file)
{
  Elf32_Ehdr elf_hdr;
  Elf64_Ehdr elf_hdr64;
  Elf32_Phdr program_hdr;
  Elf64_Phdr program_hdr64;
  uint8_t use64 = 0;

  int i;
  int current_position;
  int segs = 0;
#define SEG_RO 1
#define SEG_RW 2

  /* Read the ELF header into the 32 bit, but size of 64 */
  if (vfs_read(file, &elf_hdr64, sizeof(elf_hdr64))
      != sizeof(elf_hdr64)) {
    return -1;
  }

  /* Nasty hack */
  memcopy(sizeof(elf_hdr), &elf_hdr, &elf_hdr64);

  /* Check that the ELF magic is correct. */
  if (from_big_endian32(elf_hdr.e_ident.i) != ELF_MAGIC) {
    return -2;
  }

  /* Not an executable file */
  if (elf_hdr.e_type != ET_EXEC) {
    return -3;
  }

  /* Now, check architecture */
  if (elf_hdr.e_ident.c[EI_CLASS] & ELFCLASS64) {
    use64 = 1;
  }

  /* No program headers */
  if (use64) {
    if (elf_hdr64.e_phnum == 0) {
      return -4;
    }
  }
  else {
    if (elf_hdr.e_phnum == 0) {
      return -4;
    }
  }

  /* Zero the return structure. Uninitialized data is bad(TM). */
  memoryset(elf, 0, sizeof(*elf));

  /* Get the entry point */
  if (use64)
    elf->entry_point = (uint32_t)elf_hdr64.e_entry;
  else
    elf->entry_point = elf_hdr.e_entry;

  /* Seek to the program header table */
  if (use64)
    current_position = (uint32_t)elf_hdr64.e_phoff;
  else
    current_position = elf_hdr.e_phoff;

  vfs_seek(file, current_position);

  /* Read the program headers. */
  if (use64) {
    for (i = 0; i < elf_hdr64.e_phnum; i++) {
      if (vfs_read(file, &program_hdr64, sizeof(program_hdr64))
          != sizeof(program_hdr64)) {
        return -6;
      }

      switch (program_hdr64.p_type) {
      case PT_NULL:
      case PT_NOTE:
      case PT_PHDR:
        /* These program headers can be ignored */
        break;
      case PT_LOAD:
        /* These are the ones we are looking for */

        /* The RW segment */
        if (program_hdr64.p_flags & PF_W) {
          if (segs & SEG_RW) { /* already have an RW segment*/
            return -7;
          }
          segs |= SEG_RW;

          elf->rw_location = program_hdr64.p_offset;
          elf->rw_size = program_hdr64.p_filesz;
          elf->rw_vaddr = program_hdr64.p_vaddr;
          /* memory size rounded up to the page boundary, in pages */
          elf->rw_pages =
            (program_hdr64.p_memsz + PAGE_SIZE - 1) / PAGE_SIZE;

          /* The RO segment */
        } else {
          if (segs & SEG_RO) { /* already have an RO segment*/
            return -8;
          }
          segs |= SEG_RO;

          elf->ro_location = program_hdr64.p_offset;
          elf->ro_size = program_hdr64.p_filesz;
          elf->ro_vaddr = program_hdr64.p_vaddr;
          /* memory size rounded up to the page boundary, in pages */
          elf->ro_pages =
            (program_hdr64.p_memsz + PAGE_SIZE - 1) / PAGE_SIZE;
        }

        break;
        /* Other program headers indicate an incompatible file *or* a file
           with extra headers.  Just ignore. */
      }

      /* In case the program header size is non-standard: */
      current_position += sizeof(program_hdr64);
      vfs_seek(file, current_position);
    }
  }
  else {
    for (i = 0; i < elf_hdr.e_phnum; i++) {
      if (vfs_read(file, &program_hdr, sizeof(program_hdr))
          != sizeof(program_hdr)) {
        return -6;
      }

      switch (program_hdr.p_type) {
      case PT_NULL:
      case PT_NOTE:
      case PT_PHDR:
        /* These program headers can be ignored */
        break;
      case PT_LOAD:
        /* These are the ones we are looking for */

        /* The RW segment */
        if (program_hdr.p_flags & PF_W) {
          if (segs & SEG_RW) { /* already have an RW segment*/
            return -7;
          }
          segs |= SEG_RW;

          elf->rw_location = program_hdr.p_offset;
          elf->rw_size = program_hdr.p_filesz;
          elf->rw_vaddr = program_hdr.p_vaddr;
          /* memory size rounded up to the page boundary, in pages */
          elf->rw_pages =
            (program_hdr.p_memsz + PAGE_SIZE - 1) / PAGE_SIZE;

          /* The RO segment */
        } else {
          if (segs & SEG_RO) { /* already have an RO segment*/
            return -8;
          }
          segs |= SEG_RO;

          elf->ro_location = program_hdr.p_offset;
          elf->ro_size = program_hdr.p_filesz;
          elf->ro_vaddr = program_hdr.p_vaddr;
          /* memory size rounded up to the page boundary, in pages */
          elf->ro_pages =
            (program_hdr.p_memsz + PAGE_SIZE - 1) / PAGE_SIZE;
        }

        break;
        /* Other program headers indicate an incompatible file *or* a file
           with extra headers.  Just ignore. */
      }

      /* In case the program header size is non-standard: */
      current_position += sizeof(program_hdr);
      vfs_seek(file, current_position);
    }
  }

  /* Make sure either RW or RO segment is present: */
  return (segs > 0);
}
Beispiel #6
0
/**
 * Starts one userland process. The thread calling this function will
 * be used to run the process and will therefore never return from
 * this function. This function asserts that no errors occur in
 * process startup (the executable file exists and is a valid ecoff
 * file, enough memory is available, file operations succeed...).
 * Therefore this function is not suitable to allow startup of
 * arbitrary processes.
 *
 * @executable The name of the executable to be run in the userland
 * process
 */
void process_start(uint32_t pid)
{
    thread_table_t *my_entry;
    pagetable_t *pagetable;
    uint32_t phys_page;
    context_t user_context;
    uint32_t stack_bottom;
    elf_info_t elf;
    openfile_t file;
    const char* executable;

    int i;

    interrupt_status_t intr_status;

    my_entry = thread_get_current_thread_entry();
    my_entry->process_id = pid;
    executable = process_table[pid].executable;

    /* If the pagetable of this thread is not NULL, we are trying to
       run a userland process for a second time in the same thread.
       This is not possible. */
    KERNEL_ASSERT(my_entry->pagetable == NULL);

    pagetable = vm_create_pagetable(thread_get_current_thread());
    KERNEL_ASSERT(pagetable != NULL);

    intr_status = _interrupt_disable();
    my_entry->pagetable = pagetable;
    _interrupt_set_state(intr_status);

    file = vfs_open((char *)executable);
    /* Make sure the file existed and was a valid ELF file */
    KERNEL_ASSERT(file >= 0);
    KERNEL_ASSERT(elf_parse_header(&elf, file));

    /* Trivial and naive sanity check for entry point: */
    KERNEL_ASSERT(elf.entry_point >= PAGE_SIZE);

    /* Calculate the number of pages needed by the whole process
       (including userland stack). Since we don't have proper tlb
       handling code, all these pages must fit into TLB. */
    KERNEL_ASSERT(elf.ro_pages + elf.rw_pages + CONFIG_USERLAND_STACK_SIZE
		  <= _tlb_get_maxindex() + 1);

    /* Allocate and map stack */
    for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
    }

    /* Allocate and map pages for the segments. We assume that
       segments begin at page boundary. (The linker script in tests
       directory creates this kind of segments) */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.ro_vaddr + i*PAGE_SIZE, 1);
    }

    for(i = 0; i < (int)elf.rw_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.rw_vaddr + i*PAGE_SIZE, 1);
    }

    /* Put the mapped pages into TLB. Here we again assume that the
       pages fit into the TLB. After writing proper TLB exception
       handling this call should be skipped. */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);
    
    /* Now we may use the virtual addresses of the segments. */

    /* Zero the pages. */
    memoryset((void *)elf.ro_vaddr, 0, elf.ro_pages*PAGE_SIZE);
    memoryset((void *)elf.rw_vaddr, 0, elf.rw_pages*PAGE_SIZE);

    stack_bottom = (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - 
        (CONFIG_USERLAND_STACK_SIZE-1)*PAGE_SIZE;
    memoryset((void *)stack_bottom, 0, CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE);

    /* Copy segments */

    if (elf.ro_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.ro_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.ro_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.ro_vaddr, elf.ro_size)
		      == (int)elf.ro_size);
    }

    if (elf.rw_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.rw_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.rw_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.rw_vaddr, elf.rw_size)
		      == (int)elf.rw_size);
    }


    /* Set the dirty bit to zero (read-only) on read-only pages. */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        vm_set_dirty(my_entry->pagetable, elf.ro_vaddr + i*PAGE_SIZE, 0);
    }

    /* Insert page mappings again to TLB to take read-only bits into use */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);

    /* Initialize the user context. (Status register is handled by
       thread_goto_userland) */
    memoryset(&user_context, 0, sizeof(user_context));
    user_context.cpu_regs[MIPS_REGISTER_SP] = USERLAND_STACK_TOP;
    user_context.pc = elf.entry_point;

    vfs_close(file);

    thread_goto_userland(&user_context);

    KERNEL_PANIC("thread_goto_userland failed.");
}
Beispiel #7
0
/* Return non-zero on error. */
int setup_new_process(TID_t thread,
                      const char *executable, const char **argv_src,
                      virtaddr_t *entry_point, virtaddr_t *stack_top)
{
  pagetable_t *pagetable;
  elf_info_t elf;
  openfile_t file;
  uintptr_t phys_page;
  int i, res;
  thread_table_t *thread_entry = thread_get_thread_entry(thread);

  int argc = 1;
  virtaddr_t argv_begin;
  virtaddr_t argv_dst;
  int argv_elem_size;
  virtaddr_t argv_elem_dst;

  file = vfs_open((char *)executable);

  /* Make sure the file existed and was a valid ELF file */
  if (file < 0) {
    return -1;
  }

  res = elf_parse_header(&elf, file);
  if (res < 0) {
    return -1;
  }

  /* Trivial and naive sanity check for entry point: */
  if (elf.entry_point < PAGE_SIZE) {
    return -1;
  }

  *entry_point = elf.entry_point;

  pagetable = vm_create_pagetable(thread);

  thread_entry->pagetable = pagetable;

  /* Allocate and map stack */
  for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    vm_map(pagetable, phys_page,
           (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
  }

  /* Allocate and map pages for the segments. We assume that
     segments begin at page boundary. (The linker script in tests
     directory creates this kind of segments) */
  for(i = 0; i < (int)elf.ro_pages; i++) {
    int left_to_read = elf.ro_size - i*PAGE_SIZE;
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    /* Fill the page from ro segment */
    if (left_to_read > 0) {
      KERNEL_ASSERT(vfs_seek(file, elf.ro_location + i*PAGE_SIZE) == VFS_OK);
      KERNEL_ASSERT(vfs_read(file, (void*)ADDR_PHYS_TO_KERNEL(phys_page),
                             MIN(PAGE_SIZE, left_to_read))
                    == (int) MIN(PAGE_SIZE, left_to_read));
    }
    vm_map(pagetable, phys_page,
           elf.ro_vaddr + i*PAGE_SIZE, 0);
  }

  for(i = 0; i < (int)elf.rw_pages; i++) {
    int left_to_read = elf.rw_size - i*PAGE_SIZE;
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    /* Fill the page from rw segment */
    if (left_to_read > 0) {
      KERNEL_ASSERT(vfs_seek(file, elf.rw_location + i*PAGE_SIZE) == VFS_OK);
      KERNEL_ASSERT(vfs_read(file, (void*)ADDR_PHYS_TO_KERNEL(phys_page),
                             MIN(PAGE_SIZE, left_to_read))
                    == (int) MIN(PAGE_SIZE, left_to_read));
    }
    vm_map(pagetable, phys_page,
           elf.rw_vaddr + i*PAGE_SIZE, 1);
  }

  /* Set up argc and argv on the stack. */

  /* Start by preparing ancillary information for the new process argv. */
  if (argv_src != NULL)
    for (i = 0; argv_src[i] != NULL; i++) {
      argc++;
    }

  argv_begin = USERLAND_STACK_TOP - (argc * sizeof(virtaddr_t));
  argv_dst = argv_begin;

  /* Prepare for copying executable. */
  argv_elem_size = strlen(executable) + 1;
  argv_elem_dst = argv_dst - wordpad(argv_elem_size);

  /* Copy executable to argv[0] location. */
  vm_memwrite(pagetable,
              argv_elem_size,
              argv_elem_dst,
              executable);
  /* Set argv[i] */
  vm_memwrite(pagetable,
              sizeof(virtaddr_t),
              argv_dst,
              &argv_elem_dst);

  /* Move argv_dst to &argv[1]. */
  argv_dst += sizeof(virtaddr_t);

  if (argv_src != NULL) {
    for (i = 0; argv_src[i] != NULL; i++) {
      /* Compute the size of argv[i+1] */
      argv_elem_size = strlen(argv_src[i]) + 1;
      argv_elem_dst -= wordpad(argv_elem_size);

      /* Write the 'i+1'th element of argv */
      vm_memwrite(pagetable,
                  argv_elem_size,
                  argv_elem_dst,
                  argv_src[i]);

      /* Write argv[i+1] */
      vm_memwrite(pagetable,
                  sizeof(virtaddr_t),
                  argv_dst,
                  &argv_elem_dst);

      /* Move argv_dst to next element of argv. */
      argv_dst += sizeof(virtaddr_t);
    }
  }

  /* Write argc to the stack. */
  vm_memwrite(pagetable,
              sizeof(int),
              argv_elem_dst - sizeof(int),
              &argc);
  /* Write argv to the stack. */
  vm_memwrite(pagetable,
              sizeof(virtaddr_t),
              argv_elem_dst - sizeof(int) - sizeof(virtaddr_t),
              &argv_begin);

  /* Stack pointer points at argv. */
  *stack_top = argv_elem_dst - sizeof(int) - sizeof(virtaddr_t);

  return 0;
}
Beispiel #8
0
Datei: tfs.c Projekt: ArvoX/osm
/**
 * Creates file of given size. Implements fs.create(). Checks that
 * file name doesn't allready exist in directory block.Allocates
 * enough blocks from the allocation block for the file (1 for inode
 * and then enough for the file of given size). Reserved blocks are zeroed.
 *
 * @param fs Pointer to fs data structure of the device.
 * @param filename File name of the file to be created
 * @param size Size of the file to be created
 *
 * @return If file allready exists or not enough space return VFS_ERROR,
 * otherwise return VFS_OK.
 */
int tfs_create(fs_t *fs, char *filename, int size) 
{
    tfs_t *tfs = (tfs_t *)fs->internal;
    gbd_request_t req;
    uint32_t i;
    uint32_t numblocks = (size + TFS_BLOCK_SIZE - 1)/TFS_BLOCK_SIZE; 
    int index = -1;
    int r;
	
    semaphore_P(tfs->lock);
	
    if(numblocks > (TFS_BLOCK_SIZE / 4 - 1)) {
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
    
    /* Read directory block. Check that file doesn't allready exist and
	 there is space left for the file in directory block. */
    req.block = TFS_DIRECTORY_BLOCK;
    req.buf = ADDR_KERNEL_TO_PHYS((uint32_t)tfs->buffer_md);
    req.sem = NULL;
    r = tfs->disk->read_block(tfs->disk, &req);
    if(r == 0) {
        /* An error occured. */
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
    for(i=0;i<TFS_MAX_FILES;i++) {
        if(stringcmp(tfs->buffer_md[i].name, filename) == 0) {
            semaphore_V(tfs->lock);
            return VFS_ERROR;
        }

        if(tfs->buffer_md[i].inode == 0) {
            /* found free slot from directory */
            index = i;
        }
    }
	
    if(index == -1) {
        /* there was no space in directory, because index is not set */
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
    stringcopy(tfs->buffer_md[index].name,filename, TFS_FILENAME_MAX);
	
    /* Read allocation block and... */
    req.block = TFS_ALLOCATION_BLOCK;
    req.buf = ADDR_KERNEL_TO_PHYS((uint32_t)tfs->buffer_bat);
    req.sem = NULL;
    r = tfs->disk->read_block(tfs->disk, &req);
    if(r==0) {
        /* An error occured. */
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
	
    /* ...find space for inode... */
    tfs->buffer_md[index].inode = bitmap_findnset(tfs->buffer_bat,
                                                  tfs->totalblocks);
    if((int)tfs->buffer_md[index].inode == -1) {
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
    /* ...and the rest of the blocks. Mark found block numbers in
	 inode.*/
    tfs->buffer_inode->filesize = size;
    for(i=0; i<numblocks; i++) {
        tfs->buffer_inode->block[i] = bitmap_findnset(tfs->buffer_bat,
                                                      tfs->totalblocks);
        if((int)tfs->buffer_inode->block[i] == -1) {
            /* Disk full. No free block found. */
            semaphore_V(tfs->lock);
            return VFS_ERROR;
        }
    }
    
    /* Mark rest of the blocks in inode as unused. */
    while(i < (TFS_BLOCK_SIZE / 4 - 1))
        tfs->buffer_inode->block[i++] = 0;

    req.block = TFS_ALLOCATION_BLOCK;
    req.buf   = ADDR_KERNEL_TO_PHYS((uint32_t)tfs->buffer_bat);
    req.sem   = NULL;
    r = tfs->disk->write_block(tfs->disk, &req);
    if(r==0) {
        /* An error occured. */
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
    req.block = TFS_DIRECTORY_BLOCK;
    req.buf   = ADDR_KERNEL_TO_PHYS((uint32_t)tfs->buffer_md);
    req.sem   = NULL;
    r = tfs->disk->write_block(tfs->disk, &req);
    if(r==0) {
        /* An error occured. */
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
    req.block = tfs->buffer_md[index].inode;
    req.buf   = ADDR_KERNEL_TO_PHYS((uint32_t)tfs->buffer_inode);
    req.sem   = NULL;
    r = tfs->disk->write_block(tfs->disk, &req);
    if(r==0) {
        /* An error occured. */
        semaphore_V(tfs->lock);
        return VFS_ERROR;
    }
	
    /* Write zeros to the reserved blocks. Buffer for allocation block
	 is no longer needed, so lets use it as zero buffer. */ 
    memoryset(tfs->buffer_bat, 0, TFS_BLOCK_SIZE);
    for(i=0;i<numblocks;i++) {
        req.block = tfs->buffer_inode->block[i];
        req.buf   = ADDR_KERNEL_TO_PHYS((uint32_t)tfs->buffer_bat);
        req.sem   = NULL;
        r = tfs->disk->write_block(tfs->disk, &req);
        if(r==0) {
            /* An error occured. */
            semaphore_V(tfs->lock);
            return VFS_ERROR;
        }
       
    }
	
    semaphore_V(tfs->lock);
    return VFS_OK;
}
Beispiel #9
0
/**
 * Parse useful information from a given ELF file into the ELF info
 * structure.
 *
 * @param file The ELF file
 *
 * @param elf Information found in the file is returned in this
 * structure. In case of error this structure may contain arbitrary
 * values.
 *
 * @return 0 on failure, other values indicate success.
 */
int elf_parse_header(elf_info_t *elf, openfile_t file)
{
    Elf32_Ehdr elf_hdr;
    Elf32_Phdr program_hdr;

    int i;
    int current_position;
    int segs = 0;
#define SEG_RO 1
#define SEG_RW 2

    /* Read the ELF header */
    if (vfs_read(file, &elf_hdr, sizeof(elf_hdr))
	!= sizeof(elf_hdr)) {
        return 0;
    }

    /* Check that the ELF magic is correct. */
    if (EI_MAGIC(elf_hdr.e_ident) != ELF_MAGIC) {
        return 0;
    }

    /* File data is not MIPS 32 bit big-endian */
    if (elf_hdr.e_ident[EI_CLASS] != ELFCLASS32
	|| elf_hdr.e_ident[EI_DATA] != ELFDATA2MSB
	|| elf_hdr.e_machine != EM_MIPS) {
	return 0;
    }

    /* Invalid ELF version */
    if (elf_hdr.e_version != EV_CURRENT 
	|| elf_hdr.e_ident[EI_VERSION] != EV_CURRENT) {
	return 0;
    }

    /* Not an executable file */
    if (elf_hdr.e_type != ET_EXEC) {
	return 0;
    }

    /* No program headers */
    if (elf_hdr.e_phnum == 0) {
	return 0;
    }

    /* Zero the return structure. Uninitialized data is bad(TM). */
    memoryset(elf, 0, sizeof(*elf));

    /* Get the entry point */
    elf->entry_point = elf_hdr.e_entry;

    /* Seek to the program header table */
    current_position = elf_hdr.e_phoff;
    vfs_seek(file, current_position);

    /* Read the program headers. */
    for (i = 0; i < elf_hdr.e_phnum; i++) {
	if (vfs_read(file, &program_hdr, sizeof(program_hdr))
	    != sizeof(program_hdr)) {
	    return 0;
	}

	switch (program_hdr.p_type) {
	case PT_NULL:
	case PT_NOTE:
	case PT_PHDR:
	    /* These program headers can be ignored */
	    break;
	case PT_LOAD:
	    /* These are the ones we are looking for */

	    /* The RW segment */
	    if (program_hdr.p_flags & PF_W) {
		if (segs & SEG_RW) { /* already have an RW segment*/
		    return 0;
		}
		segs |= SEG_RW;

		elf->rw_location = program_hdr.p_offset;
		elf->rw_size = program_hdr.p_filesz;
		elf->rw_vaddr = program_hdr.p_vaddr;
		/* memory size rounded up to the page boundary, in pages */
		elf->rw_pages = 
		    (program_hdr.p_memsz + PAGE_SIZE - 1) / PAGE_SIZE;

	    /* The RO segment */
	    } else {
		if (segs & SEG_RO) { /* already have an RO segment*/
		    return 0;
		}
		segs |= SEG_RO; 

		elf->ro_location = program_hdr.p_offset;
		elf->ro_size = program_hdr.p_filesz;
		elf->ro_vaddr = program_hdr.p_vaddr;
		/* memory size rounded up to the page boundary, in pages */
		elf->ro_pages = 
		    (program_hdr.p_memsz + PAGE_SIZE - 1) / PAGE_SIZE;
	    }

	    break;
	default:
	    /* Other program headers indicate an incompatible file */
	    return 0;
	}

	/* In case the program header size is non-standard: */
	current_position += sizeof(program_hdr);
	vfs_seek(file, current_position);
    }

    /* Make sure either RW or RO segment is present: */
    return (segs > 0);
}