Ejemplo n.º 1
0
Archivo: vfs.c Proyecto: PtxDK/OSM
int vfs_seek(openfile_t file, int seek_position)
{
  openfile_entry_t *openfile;

  if (vfs_start_op() != VFS_OK)
    return VFS_UNUSABLE;

  KERNEL_ASSERT(seek_position >= 0);
  semaphore_P(openfile_table.sem);

  openfile = vfs_verify_open(file);
  openfile->seek_position = seek_position;

  semaphore_V(openfile_table.sem);

  vfs_end_op();
  return VFS_OK;
}
Ejemplo n.º 2
0
/**
 * Sets the given bit in the bitmap.
 *
 * @param bitmap The bitmap
 *
 * @param pos The index of the bit to set
 *
 * @param value The new value of the given bit. Valid values are 0 and
 * 1.
 */
void bitmap_set(bitmap_t *bitmap, int pos, int value)
{
  int i;
  int j;

  KERNEL_ASSERT(pos >= 0);

  i = pos / 32;
  j = pos % 32;

  if (value == 0) {
    bitmap[i] = bitmap[i] & ~(1 << j);
  } else if (value == 1) {
    bitmap[i] = bitmap[i] | (1 << j);
  } else {
    KERNEL_PANIC("bit value other than 0 or 1");
  }
}
Ejemplo n.º 3
0
Archivo: vfs.c Proyecto: Rotte/osm-k
int vfs_mount(fs_t *fs, char *name)
{
    int i;
    int row;

    KERNEL_ASSERT(name != NULL && name[0] != '\0');

    if (vfs_start_op() != VFS_OK)
        return VFS_UNUSABLE;

    semaphore_P(vfs_table.sem);

    for (i = 0; i < CONFIG_MAX_FILESYSTEMS; i++) {
        if (vfs_table.filesystems[i].filesystem == NULL)
            break;
    }

    row = i;

    if(row >= CONFIG_MAX_FILESYSTEMS) {
        semaphore_V(vfs_table.sem);
        kprintf("VFS: Warning, maximum mount count exceeded, mount failed.\n");
        vfs_end_op();
        return VFS_LIMIT;
    }

    for (i = 0; i < CONFIG_MAX_FILESYSTEMS; i++) {
        if(stringcmp(vfs_table.filesystems[i].mountpoint, name) == 0) {
            semaphore_V(vfs_table.sem);
            kprintf("VFS: Warning, attempt to mount 2 filesystems "
                    "with same name\n");
            vfs_end_op();
            return VFS_ERROR;
        }
    }

    stringcopy(vfs_table.filesystems[row].mountpoint, name, VFS_NAME_LENGTH);
    vfs_table.filesystems[row].filesystem = fs;

    semaphore_V(vfs_table.sem);
    vfs_end_op();
    return VFS_OK;
}
Ejemplo n.º 4
0
void							ker::module::task::run()
{
	KERNEL_ASSERT( !complete_ );


	// this task
	if( func_ )
	{
		try
		{
			func_();

			complete_ = true;
		}
		catch( std::exception& e )
		{
			KERNEL_LOG( "%s\n", e.what() );
		}
	}
}
Ejemplo n.º 5
0
void cpustatus_generate_irq(device_t *dev)
{
  interrupt_status_t intr_status;
  volatile cpu_io_area_t *iobase = (cpu_io_area_t *)dev->io_address;
  cpu_real_device_t *cpu = (cpu_real_device_t *)dev->real_device;

  KERNEL_ASSERT(dev != NULL && cpu != NULL);

  intr_status = _interrupt_disable();
  spinlock_acquire(&cpu->slock);

  /* If you really want to do something with inter-cpu interrupts,
     do it here.*/

  /* Generate the IRQ */
  iobase->command = CPU_COMMAND_RAISE_IRQ;

  spinlock_release(&cpu->slock);
  _interrupt_set_state(intr_status);
}
Ejemplo n.º 6
0
/** Perform suicide. The calling thread will kill itself by freeing
 * its memory and other resources and marking itself as dying. The
 * scheduler will free the thread table entry when it encounters dying
 * threads.
 */
void thread_finish(void)
{
    TID_t my_tid;

    my_tid = thread_get_current_thread();

    _interrupt_disable();

    /* Check that the page mappings have been cleared. */
    KERNEL_ASSERT(thread_table[my_tid].pagetable == NULL);

    spinlock_acquire(&thread_table_slock);
    thread_table[my_tid].state = THREAD_DYING;
    spinlock_release(&thread_table_slock);

    _interrupt_enable();
    _interrupt_generate_sw0();

    /* not possible without a stack? alternative in assembler? */
    KERNEL_PANIC("thread_finish(): thread was not destroyed");
}
Ejemplo n.º 7
0
/** Initializes the threading system. Does this by setting all thread
 *  table entry states to THREAD_FREE. Called only once before any
 *  threads are created.
 */
void thread_table_init(void)
{
  int i;

  /* Thread table entry _must_ be 64 bytes long, because the
     context switching code in kernel/cswitch.S expects that. Let's
     make sure it is. If you hit this error, you have changed either
     context_t or thread_table_t, but have not changed padding in
     the end of thread_table_t definition in kernel/thread.h */
  KERNEL_ASSERT(sizeof(thread_table_t) == 64);

  spinlock_reset(&thread_table_slock);

  /* Init all entries to 'NULL' */
  for (i=0; i<CONFIG_MAX_THREADS; i++) {
    /* Set context pointers to the top of the stack*/
    thread_table[i].context      = (context_t *) (thread_stack_areas
                                                  +CONFIG_THREAD_STACKSIZE*i + CONFIG_THREAD_STACKSIZE -
                                                  sizeof(context_t));
    thread_table[i].user_context = NULL;
    thread_table[i].state        = THREAD_FREE;
    thread_table[i].sleeps_on    = 0;
    thread_table[i].pagetable    = NULL;
    thread_table[i].attribs      = 0;
    thread_table[i].process_id   = -1;
    thread_table[i].next         = -1;
  }

  /* Setup Idle Thread */
  _context_set_ip(thread_table[IDLE_THREAD_TID].context,
                  (virtaddr_t)_idle_thread_wait_loop);
  _context_set_sp(thread_table[IDLE_THREAD_TID].context,
                  (virtaddr_t) thread_stack_areas + CONFIG_THREAD_STACKSIZE - 4 -
                  sizeof(context_t));
  _context_enable_ints(thread_table[IDLE_THREAD_TID].context);

  thread_table[IDLE_THREAD_TID].state = THREAD_READY;
  thread_table[IDLE_THREAD_TID].context->prev_context =
    thread_table[IDLE_THREAD_TID].context;
}
Ejemplo n.º 8
0
void							ker::module::tasks::run()
{
	while( task_flag_ )
	{
		int count = 0;

		for( uint8_t i = 0; i < 32; i++ )
		{
			uint32_t mask = 1 << i;
			if( task_flag_ & mask )
			{
				KERNEL_LOG( "%i %i\n", task_flag_, mask );	

				std::shared_ptr<ker::module::task> task = task_map_[mask];

				KERNEL_ASSERT( task );

				try 
				{
					task->run();

					task_flag_ &= ~mask;

					count++;
				}
				catch( std::exception& e )
				{
					KERNEL_LOG( "%s\n", e.what() );
				}
			}
		}

		if( count == 0 )
		{
			KERNEL_LOG( "no in tasks progress\n" );
			exit(0);
		}
	}
}
Ejemplo n.º 9
0
/**
 * Finds first free physical page and marks it reserved.
 *
 * @return Address of first free physical page, zero if no free pages
 * are available.
 */
physaddr_t physmem_allocblock(void)
{
  interrupt_status_t intr_status;
  int i;

  intr_status = _interrupt_disable();
  spinlock_acquire(&physmem_slock);

  if (physmem_num_free_pages > 0) {
    i = bitmap_findnset(physmem_free_pages,physmem_num_pages);
    physmem_num_free_pages--;

    /* There should have been a free page. Check that the physmem
       internal variables are in synch. */
    KERNEL_ASSERT(i >= 0 && physmem_num_free_pages >= 0);
  } else {
    i = 0;
  }

  spinlock_release(&physmem_slock);
  _interrupt_set_state(intr_status);
  return i*PAGE_SIZE;
}
Ejemplo n.º 10
0
Archivo: vfs.c Proyecto: Rotte/osm-k
/**
 * Force unmount on all filesystems. This function should be used only
 * when halting the system. Waits for all VFS operations to complete
 * but does not wait for all files to be closed. After this function
 * is called the VFS and the whole operating system can no longer be
 * used.
 */
void vfs_deinit(void)
{
    fs_t *fs;
    int row;

    semaphore_P(vfs_op_sem);
    vfs_usable = 0;

    kprintf("VFS: Entering forceful unmount of all filesystems.\n");
    if (vfs_ops > 0) {
        kprintf("VFS: Delaying force unmount until the pending %d "
                "operations are done.\n", vfs_ops);
        semaphore_V(vfs_op_sem);
        semaphore_P(vfs_unmount_sem);
        semaphore_P(vfs_op_sem);
        KERNEL_ASSERT(vfs_ops == 0);
        kprintf("VFS: Continuing forceful unmount.\n");
    }

    semaphore_P(vfs_table.sem);
    semaphore_P(openfile_table.sem);

    for (row = 0; row < CONFIG_MAX_FILESYSTEMS; row++) {
        fs = vfs_table.filesystems[row].filesystem;
        if (fs != NULL) {
            kprintf("VFS: Forcefully unmounting volume [%s]\n",
                    vfs_table.filesystems[row].mountpoint);
            fs->unmount(fs);
            vfs_table.filesystems[row].filesystem = NULL;
        }
    }

    semaphore_V(openfile_table.sem);
    semaphore_V(vfs_table.sem);
    semaphore_V(vfs_op_sem);
}
Ejemplo n.º 11
0
Archivo: main.c Proyecto: cfrost/buenos
void init(void)
{
    TID_t startup_thread;
    int numcpus;

    /* Initialize polling TTY driver for kprintf() usage. */
    polltty_init();

    kwrite("BUENOS is a University Educational Nutshell Operating System\n");
    kwrite("==========================================================\n");
    kwrite("\n");

    kwrite("Copyright (C) 2003-2006  Juha Aatrokoski, Timo Lilja,\n");
    kwrite("  Leena Salmela, Teemu Takanen, Aleksi Virtanen\n");
    kwrite("See the file COPYING for licensing details.\n");
    kwrite("\n");

    kwrite("Initializing memory allocation system\n");
    kmalloc_init();

    kwrite("Reading boot arguments\n");
    bootargs_init();

    /* Seed the random number generator. */
    if (bootargs_get("randomseed") == NULL) {
	_set_rand_seed(0);
    } else {
	int seed = atoi(bootargs_get("randomseed"));
	kprintf("Seeding pseudorandom number generator with %i\n", seed);
	_set_rand_seed(seed);
    }

    numcpus = cpustatus_count();
    kprintf("Detected %i CPUs\n", numcpus);
    KERNEL_ASSERT(numcpus <= CONFIG_MAX_CPUS);

    kwrite("Initializing interrupt handling\n");
    interrupt_init(numcpus);

    kwrite("Initializing threading system\n");
    thread_table_init();

    kwrite("Initializing user process system\n");
    process_init();

    kwrite("Initializing sleep queue\n");
    sleepq_init();

    kwrite("Initializing semaphores\n");
    semaphore_init();

    kwrite("Initializing device drivers\n");
    device_init();

    kprintf("Initializing virtual filesystem\n");
    vfs_init();

    kwrite("Initializing scheduler\n");
    scheduler_init();

    kwrite("Initializing virtual memory\n");
    vm_init();

    kprintf("Creating initialization thread\n");
    startup_thread = thread_create(&init_startup_thread, 0);
    thread_run(startup_thread);

    kprintf("Starting threading system and SMP\n");

    /* Let other CPUs run */
    kernel_bootstrap_finished = 1;
    
    _interrupt_clear_bootstrap();
    _interrupt_enable();

    /* Enter context switch, scheduler will be run automatically,
       since thread_switch() behaviour is identical to timer tick
       (thread timeslice is over). */
    thread_switch();

    /* We should never get here */
    KERNEL_PANIC("Threading system startup failed.");
}
Ejemplo n.º 12
0
/**
 * Starts one userland process. The thread calling this function will
 * be used to run the process and will therefore never return from
 * this function. This function asserts that no errors occur in
 * process startup (the executable file exists and is a valid ecoff
 * file, enough memory is available, file operations succeed...).
 * Therefore this function is not suitable to allow startup of
 * arbitrary processes.
 *
 * @executable The name of the executable to be run in the userland
 * process
 */
void process_start(uint32_t pid)
{
    thread_table_t *my_entry;
    pagetable_t *pagetable;
    uint32_t phys_page;
    context_t user_context;
    uint32_t stack_bottom;
    elf_info_t elf;
    openfile_t file;
    const char* executable;

    int i;

    interrupt_status_t intr_status;

    my_entry = thread_get_current_thread_entry();
    my_entry->process_id = pid;
    executable = process_table[pid].executable;

    /* If the pagetable of this thread is not NULL, we are trying to
       run a userland process for a second time in the same thread.
       This is not possible. */
    KERNEL_ASSERT(my_entry->pagetable == NULL);

    pagetable = vm_create_pagetable(thread_get_current_thread());
    KERNEL_ASSERT(pagetable != NULL);

    intr_status = _interrupt_disable();
    my_entry->pagetable = pagetable;
    _interrupt_set_state(intr_status);

    file = vfs_open((char *)executable);
    /* Make sure the file existed and was a valid ELF file */
    KERNEL_ASSERT(file >= 0);
    KERNEL_ASSERT(elf_parse_header(&elf, file));

    /* Trivial and naive sanity check for entry point: */
    KERNEL_ASSERT(elf.entry_point >= PAGE_SIZE);

    /* Calculate the number of pages needed by the whole process
       (including userland stack). Since we don't have proper tlb
       handling code, all these pages must fit into TLB. */
    KERNEL_ASSERT(elf.ro_pages + elf.rw_pages + CONFIG_USERLAND_STACK_SIZE
		  <= _tlb_get_maxindex() + 1);

    /* Allocate and map stack */
    for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
    }

    /* Allocate and map pages for the segments. We assume that
       segments begin at page boundary. (The linker script in tests
       directory creates this kind of segments) */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.ro_vaddr + i*PAGE_SIZE, 1);
    }

    for(i = 0; i < (int)elf.rw_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.rw_vaddr + i*PAGE_SIZE, 1);
    }

    /* Put the mapped pages into TLB. Here we again assume that the
       pages fit into the TLB. After writing proper TLB exception
       handling this call should be skipped. */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);
    
    /* Now we may use the virtual addresses of the segments. */

    /* Zero the pages. */
    memoryset((void *)elf.ro_vaddr, 0, elf.ro_pages*PAGE_SIZE);
    memoryset((void *)elf.rw_vaddr, 0, elf.rw_pages*PAGE_SIZE);

    stack_bottom = (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - 
        (CONFIG_USERLAND_STACK_SIZE-1)*PAGE_SIZE;
    memoryset((void *)stack_bottom, 0, CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE);

    /* Copy segments */

    if (elf.ro_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.ro_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.ro_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.ro_vaddr, elf.ro_size)
		      == (int)elf.ro_size);
    }

    if (elf.rw_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.rw_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.rw_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.rw_vaddr, elf.rw_size)
		      == (int)elf.rw_size);
    }


    /* Set the dirty bit to zero (read-only) on read-only pages. */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        vm_set_dirty(my_entry->pagetable, elf.ro_vaddr + i*PAGE_SIZE, 0);
    }

    /* Insert page mappings again to TLB to take read-only bits into use */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);

    /* Initialize the user context. (Status register is handled by
       thread_goto_userland) */
    memoryset(&user_context, 0, sizeof(user_context));
    user_context.cpu_regs[MIPS_REGISTER_SP] = USERLAND_STACK_TOP;
    user_context.pc = elf.entry_point;

    vfs_close(file);

    thread_goto_userland(&user_context);

    KERNEL_PANIC("thread_goto_userland failed.");
}
Ejemplo n.º 13
0
/** Handles an interrupt (exception code 0). All interrupt handlers
 * that are registered for any of the occured interrupts (hardware
 * 0-5, software 0-1) are called. The scheduler is called if a timer
 * interrupt (hardware 5) or a context switch request (software
 * interrupt 0) occured, or if the currently running thread for the
 * processor is the idle thread.
 *
 * @param cause The Cause register from CP0
 */
void interrupt_handle(virtaddr_t cause) {
    int this_cpu, i;

    if(cause & INTERRUPT_CAUSE_SOFTWARE_0) {
        _interrupt_clear_sw0();
    }

    this_cpu = _interrupt_getcpu();

    /* Exceptions should be handled elsewhere: */
    if((cause  & 0x0000007c) != 0) {
        kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu);
        KERNEL_PANIC("Exception in interrupt_handle");
    }


    /* Call appropiate interrupt handlers.  Handlers cannot be
     * unregistered, so after the first empty * entry all others are
     * also empty.
     */
    for (i=0; i<CONFIG_MAX_DEVICES; i++) {
        if (interrupt_handlers[i].device == NULL)
            break;

        /* If this handler is registered for any of the interrupts
         * that occured, call it.
         */
        if ((cause & interrupt_handlers[i].irq) != 0)
            interrupt_handlers[i].handler(interrupt_handlers[i].device);
    }


    /* Timer interrupt (HW5) or requested context switch (SW0)
     * Also call scheduler if we're running the idle thread.
     */
    if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 |
                 INTERRUPT_CAUSE_HARDWARE_5)) ||
            scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) {
        scheduler_schedule();

        /* Until we have proper VM we must manually fill
           the TLB with pagetable entries before running code using
           given pagetable. Note that this method limits pagetable
           rows (possible mapping pairs) to 16 and can't be used
           with proper pagetables and VM.

           Note that if you remove this call (which you probably do when
           you implement proper VM), you must manually call _tlb_set_asid
           here. See the implementation of tlb_fill on details how to do that.
        */
        pagetable_t* pagetable = thread_get_current_thread_entry()->pagetable;

        if(pagetable == NULL)
            return;

        /* Check that the pagetable can fit into TLB. This is needed until
           we have proper VM system, because the whole pagetable must fit
           into TLB. */
        KERNEL_ASSERT(pagetable->valid_count <= (_tlb_get_maxindex()+1));

        _tlb_write(pagetable->entries, 0, pagetable->valid_count);

        /* Set ASID field in Co-Processor 0 to match thread ID so that
           only entries with the ASID of the current thread will match in
           the TLB hardware. */
        _tlb_set_asid(pagetable->ASID);
    }
}
Ejemplo n.º 14
0
void Mutex_Claii( Mutex_t *pstMutex_ )
#endif
{
    KERNEL_TRACE_1( STR_MUTEX_CLAIM_1, (K_USHORT)Thread_GetID( g_pstCurrent ) );

#if KERNEL_USE_TIMEOUTS
    Timer_t stTimer;
    K_BOOL bUseTimer = false;
#endif

    // Disable the scheduler while claiming the Mutex_t - we're dealing with all
    // sorts of private thread data, can't have a thread switch while messing
    // with internal data structures.
    Scheduler_SetScheduler( false );

    // Check to see if the Mutex_t is claimed or not
    if (pstMutex_->bReady != 0)
    {
        // Mutex_t isn't claimed, claim it.
        pstMutex_->bReady = 0;
        pstMutex_->ucRecurse = 0;
        pstMutex_->ucMaxPri = Thread_GetPriority( g_pstCurrent );
        pstMutex_->pstOwner = g_pstCurrent;

        Scheduler_SetScheduler( true );

#if KERNEL_USE_TIMEOUTS
        return true;
#else
        return;
#endif
    }

    // If the Mutex_t is already claimed, check to see if this is the owner thread,
    // since we allow the Mutex_t to be claimed recursively.
    if (g_pstCurrent == pstMutex_->pstOwner)
    {
        // Ensure that we haven't exceeded the maximum recursive-lock count
        KERNEL_ASSERT( (pstMutex_->ucRecurse < 255) );
        pstMutex_->ucRecurse++;

        // Increment the lock count and bail
        Scheduler_SetScheduler( true );
#if KERNEL_USE_TIMEOUTS
        return true;
#else
        return;
#endif
    }

    // The Mutex_t is claimed already - we have to block now.  Move the
    // current thread to the list of threads waiting on the Mutex_t.
#if KERNEL_USE_TIMEOUTS
    if (ulWaitTimeMS_)
    {
		Thread_SetExpired( g_pstCurrent, false );
        
        Timer_Init( &stTimer );
        Timer_Start( &stTimer, false, ulWaitTimeMS_, (TimerCallback_t)TimedMutex_Calback, (void*)pstMutex_);
        bUseTimer = true;
    }
#endif
    BlockingObject_Block( (ThreadList_t*)pstMutex_, g_pstCurrent );

    // Check if priority inheritence is necessary.  We do this in order
    // to ensure that we don't end up with priority inversions in case
    // multiple threads are waiting on the same resource.
    if(pstMutex_->ucMaxPri <= Thread_GetPriority( g_pstCurrent ) )
    {
        pstMutex_->ucMaxPri = Thread_GetPriority( g_pstCurrent );

        Thread_t *pstTemp = (Thread_t*)(LinkList_GetHead( (LinkList_t*)pstMutex_ ));
        while(pstTemp)
        {
            Thread_InheritPriority( pstTemp, pstMutex_->ucMaxPri );
			if(pstTemp == (Thread_t*)(LinkList_GetTail( (LinkList_t*)pstMutex_ )) )
            {
                break;
            }
            pstTemp = (Thread_t*)LinkListNode_GetNext( (LinkListNode_t*)pstTemp );
        }
        Thread_InheritPriority( pstMutex_->pstOwner, pstMutex_->ucMaxPri );
    }

    // Done with thread data -reenable the scheduler
    Scheduler_SetScheduler( true );

    // Switch threads if this thread acquired the Mutex_t
    Thread_Yield();

#if KERNEL_USE_TIMEOUTS
    if (bUseTimer)
    {
        Timer_Stop( &stTimer );
        return ( Thread_GetExpired( g_pstCurrent ) == 0);
    }
    return true;
#endif
}
Ejemplo n.º 15
0
Archivo: tfs.c Proyecto: ArvoX/osm
/** 
 * Initialize trivial filesystem. Allocates 1 page of memory dynamically for
 * filesystem data structure, tfs data structure and buffers needed.
 * Sets fs_t and tfs_t fields. If initialization is succesful, returns
 * pointer to fs_t data structure. Else NULL pointer is returned.
 *
 * @param Pointer to gbd-device performing tfs.
 *
 * @return Pointer to the filesystem data structure fs_t, if fails
 * return NULL. 
 */
fs_t * tfs_init(gbd_t *disk) 
{
    uint32_t addr;
    gbd_request_t req;
    char name[TFS_VOLUMENAME_MAX];
    fs_t *fs;
    tfs_t *tfs;
    int r;
    semaphore_t *sem;
	
    if(disk->block_size(disk) != TFS_BLOCK_SIZE)
        return NULL;

    /* check semaphore availability before memory allocation */
    sem = semaphore_create(1);
    if (sem == NULL) {
        kprintf("tfs_init: could not create a new semaphore.\n");
        return NULL;
    }
	
    addr = pagepool_get_phys_page();
    if(addr == 0) {
        semaphore_destroy(sem);
        kprintf("tfs_init: could not allocate memory.\n");
        return NULL;
    }
    addr = ADDR_PHYS_TO_KERNEL(addr);      /* transform to vm address */
	
	
    /* Assert that one page is enough */
    KERNEL_ASSERT(PAGE_SIZE >= (3*TFS_BLOCK_SIZE+sizeof(tfs_t)+sizeof(fs_t)));
    
    /* Read header block, and make sure this is tfs drive */
    req.block = 0;
    req.sem = NULL;
    req.buf = ADDR_KERNEL_TO_PHYS(addr);   /* disk needs physical addr */
    r = disk->read_block(disk, &req);
    if(r == 0) {
        semaphore_destroy(sem);
        pagepool_free_phys_page(ADDR_KERNEL_TO_PHYS(addr));
        kprintf("tfs_init: Error during disk read. Initialization failed.\n");
        return NULL; 
    }
	
    if(((uint32_t *)addr)[0] != TFS_MAGIC) {
        semaphore_destroy(sem);
        pagepool_free_phys_page(ADDR_KERNEL_TO_PHYS(addr));
        return NULL;
    }
	
    /* Copy volume name from header block. */
    stringcopy(name, (char *)(addr+4), TFS_VOLUMENAME_MAX);
	
    /* fs_t, tfs_t and all buffers in tfs_t fit in one page, so obtain
	 addresses for each structure and buffer inside the allocated
	 memory page. */
    fs  = (fs_t *)addr;
    tfs = (tfs_t *)(addr + sizeof(fs_t));
    tfs->buffer_inode = (tfs_inode_t *)((uint32_t)tfs + sizeof(tfs_t));
    tfs->buffer_bat  = (bitmap_t *)((uint32_t)tfs->buffer_inode + 
                                    TFS_BLOCK_SIZE);
    tfs->buffer_md   = (tfs_direntry_t *)((uint32_t)tfs->buffer_bat + 
                                        TFS_BLOCK_SIZE);

    tfs->totalblocks = MIN(disk->total_blocks(disk), 8*TFS_BLOCK_SIZE);
    tfs->disk        = disk;
	
    /* save the semaphore to the tfs_t */
    tfs->lock = sem;
	
    fs->internal = (void *)tfs;
    stringcopy(fs->volume_name, name, VFS_NAME_LENGTH);
	
    fs->unmount = tfs_unmount;
    fs->open    = tfs_open;
    fs->close   = tfs_close;
    fs->create  = tfs_create;
    fs->remove  = tfs_remove;
    fs->read    = tfs_read;
    fs->write   = tfs_write;
    fs->getfree  = tfs_getfree;
	
    return fs;
}
Ejemplo n.º 16
0
/* Return non-zero on error. */
int setup_new_process(TID_t thread,
                      const char *executable, const char **argv_src,
                      virtaddr_t *entry_point, virtaddr_t *stack_top)
{
  pagetable_t *pagetable;
  elf_info_t elf;
  openfile_t file;
  uintptr_t phys_page;
  int i, res;
  thread_table_t *thread_entry = thread_get_thread_entry(thread);

  int argc = 1;
  virtaddr_t argv_begin;
  virtaddr_t argv_dst;
  int argv_elem_size;
  virtaddr_t argv_elem_dst;

  file = vfs_open((char *)executable);

  /* Make sure the file existed and was a valid ELF file */
  if (file < 0) {
    return -1;
  }

  res = elf_parse_header(&elf, file);
  if (res < 0) {
    return -1;
  }

  /* Trivial and naive sanity check for entry point: */
  if (elf.entry_point < PAGE_SIZE) {
    return -1;
  }

  *entry_point = elf.entry_point;

  pagetable = vm_create_pagetable(thread);

  thread_entry->pagetable = pagetable;

  /* Allocate and map stack */
  for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    vm_map(pagetable, phys_page,
           (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
  }

  /* Allocate and map pages for the segments. We assume that
     segments begin at page boundary. (The linker script in tests
     directory creates this kind of segments) */
  for(i = 0; i < (int)elf.ro_pages; i++) {
    int left_to_read = elf.ro_size - i*PAGE_SIZE;
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    /* Fill the page from ro segment */
    if (left_to_read > 0) {
      KERNEL_ASSERT(vfs_seek(file, elf.ro_location + i*PAGE_SIZE) == VFS_OK);
      KERNEL_ASSERT(vfs_read(file, (void*)ADDR_PHYS_TO_KERNEL(phys_page),
                             MIN(PAGE_SIZE, left_to_read))
                    == (int) MIN(PAGE_SIZE, left_to_read));
    }
    vm_map(pagetable, phys_page,
           elf.ro_vaddr + i*PAGE_SIZE, 0);
  }

  for(i = 0; i < (int)elf.rw_pages; i++) {
    int left_to_read = elf.rw_size - i*PAGE_SIZE;
    phys_page = physmem_allocblock();
    KERNEL_ASSERT(phys_page != 0);
    /* Zero the page */
    memoryset((void*)ADDR_PHYS_TO_KERNEL(phys_page), 0, PAGE_SIZE);
    /* Fill the page from rw segment */
    if (left_to_read > 0) {
      KERNEL_ASSERT(vfs_seek(file, elf.rw_location + i*PAGE_SIZE) == VFS_OK);
      KERNEL_ASSERT(vfs_read(file, (void*)ADDR_PHYS_TO_KERNEL(phys_page),
                             MIN(PAGE_SIZE, left_to_read))
                    == (int) MIN(PAGE_SIZE, left_to_read));
    }
    vm_map(pagetable, phys_page,
           elf.rw_vaddr + i*PAGE_SIZE, 1);
  }

  /* Set up argc and argv on the stack. */

  /* Start by preparing ancillary information for the new process argv. */
  if (argv_src != NULL)
    for (i = 0; argv_src[i] != NULL; i++) {
      argc++;
    }

  argv_begin = USERLAND_STACK_TOP - (argc * sizeof(virtaddr_t));
  argv_dst = argv_begin;

  /* Prepare for copying executable. */
  argv_elem_size = strlen(executable) + 1;
  argv_elem_dst = argv_dst - wordpad(argv_elem_size);

  /* Copy executable to argv[0] location. */
  vm_memwrite(pagetable,
              argv_elem_size,
              argv_elem_dst,
              executable);
  /* Set argv[i] */
  vm_memwrite(pagetable,
              sizeof(virtaddr_t),
              argv_dst,
              &argv_elem_dst);

  /* Move argv_dst to &argv[1]. */
  argv_dst += sizeof(virtaddr_t);

  if (argv_src != NULL) {
    for (i = 0; argv_src[i] != NULL; i++) {
      /* Compute the size of argv[i+1] */
      argv_elem_size = strlen(argv_src[i]) + 1;
      argv_elem_dst -= wordpad(argv_elem_size);

      /* Write the 'i+1'th element of argv */
      vm_memwrite(pagetable,
                  argv_elem_size,
                  argv_elem_dst,
                  argv_src[i]);

      /* Write argv[i+1] */
      vm_memwrite(pagetable,
                  sizeof(virtaddr_t),
                  argv_dst,
                  &argv_elem_dst);

      /* Move argv_dst to next element of argv. */
      argv_dst += sizeof(virtaddr_t);
    }
  }

  /* Write argc to the stack. */
  vm_memwrite(pagetable,
              sizeof(int),
              argv_elem_dst - sizeof(int),
              &argc);
  /* Write argv to the stack. */
  vm_memwrite(pagetable,
              sizeof(virtaddr_t),
              argv_elem_dst - sizeof(int) - sizeof(virtaddr_t),
              &argv_begin);

  /* Stack pointer points at argv. */
  *stack_top = argv_elem_dst - sizeof(int) - sizeof(virtaddr_t);

  return 0;
}
Ejemplo n.º 17
0
static TID_t scheduler_remove_first_ready(void)
{
    TID_t t;

    t = scheduler_ready_to_run.head;

    /* The head has the earliest deadline as a starting point */
    TID_t i = scheduler_ready_to_run.head;

    int no_high_p = 1;
    int j = i;

    /* Makes sure that earliest deadline starting point is not zero (low-priority).
       If all deadlines are zero, the head is set as the first thread to be run */
    if (thread_table[i].deadline == 0)
    {
      while (thread_table[i].next >= 0)
      {
        TID_t i_next = thread_table[i].next;
        if (thread_table[i_next].deadline > 0)
        { 
          j = i_next;
          t = j;
          no_high_p = 0;
          /* If a deadline is higher than zero is found - break out of loop */
          break;
        }
        i = i_next; 
      }
    }

    /* Finds the thread with earliest deadline in the threadtable and
       sets it as t */
    if (no_high_p == 0)
    {
      while (thread_table[j].next >= 0)
      {
        TID_t j_next = thread_table[j].next;
        if (thread_table[t].deadline > thread_table[j_next].deadline && 
            thread_table[j_next].deadline >= 1)
        {
          t = j_next;
        }
        j = j_next;   
      }
    }
  
    /* Idle thread should never be on the ready list. */
    KERNEL_ASSERT(t != IDLE_THREAD_TID);

    if(t >= 0) {
        /* Threads in ready queue should be in state Ready */
        KERNEL_ASSERT(thread_table[t].state == THREAD_READY);
	if(scheduler_ready_to_run.tail == t) {
	    scheduler_ready_to_run.tail = -1;
	}
	scheduler_ready_to_run.head =
	    thread_table[scheduler_ready_to_run.head].next;

    }

    if(t < 0) {
	return IDLE_THREAD_TID;
    } else {
	return t;
    }
}
Ejemplo n.º 18
0
Archivo: main.c Proyecto: JanmanX/KUDOS
void init(void)
{
    TID_t startup_thread;
    int numcpus;

    /* Initialise Static Allocation */
    stalloc_init();

    /* Initialize polling TTY driver for kprintf() usage. */
    polltty_init();

    kwrite("Kudos is an educational operating system by the University of Copenhagen\n");
    kwrite("========================================================================\n");
    kwrite("Based on the Buenos operating system skeleton\n");
    kwrite("\n");

    kprintf("Copyright (C) 2003-2016  Juha Aatrokoski, Timo Lilja,\n");
    kprintf("  Leena Salmela, Teemu Takanen, Aleksi Virtanen, Philip Meulengracht,\n");
    kprintf("  Troels Henriksen, Annie Jane Pinder, Niels Gustav Westphal Serup,\n");
    kprintf("  Nicklas Warming Jacobsen, Oleksandr Shturmov.\n");
    kwrite("See the file COPYING for licensing details.\n");
    kwrite("\n");

    kwrite("Reading boot arguments\n");
    bootargs_init((void*)BOOT_ARGUMENT_AREA);

    /* Seed the random number generator. */
    if (bootargs_get("randomseed") == NULL) {
        _set_rand_seed(0);
    } else {
        int seed = atoi(bootargs_get("randomseed"));
        kprintf("Seeding pseudorandom number generator with %i\n", seed);
        _set_rand_seed(seed);
    }

    numcpus = cpustatus_count();
    kprintf("Detected %i CPUs\n", numcpus);
    KERNEL_ASSERT(numcpus <= CONFIG_MAX_CPUS);

    kwrite("Initializing interrupt handling\n");
    interrupt_init(numcpus);

    kwrite("Initializing threading system\n");
    thread_table_init();

    kwrite("Initializing sleep queue\n");
    sleepq_init();

    kwrite("Initializing semaphores\n");
    semaphore_init();

    kwrite("Initializing device drivers\n");
    device_init();

    kprintf("Initializing virtual filesystem\n");
    vfs_init();

    kwrite("Initializing scheduler\n");
    scheduler_init();

    kwrite("Initializing virtual memory\n");
    vm_init();

    kprintf("Creating initialization thread\n");
    startup_thread = thread_create(&init_startup_thread, 0);
    thread_run(startup_thread);

    kprintf("Starting threading system and SMP\n");

    /* Let other CPUs run */
    kernel_bootstrap_finished = 1;

    _interrupt_clear_bootstrap();
    _interrupt_enable();

    /* Enter context switch, scheduler will be run automatically,
       since thread_switch() behaviour is identical to timer tick
       (thread timeslice is over). */
    thread_switch();

    /* We should never get here */
    KERNEL_PANIC("Threading system startup failed.");
}