コード例 #1
0
ファイル: request.c プロジェクト: bend/Minix-Defrag
/*===========================================================================*
 *				req_newdriver          			     *
 *===========================================================================*/
PUBLIC int req_newdriver(
  endpoint_t fs_e,
  dev_t dev,
  endpoint_t driver_e
)
{
/* Note: this is the only request function that doesn't use the 
 * fs_sendrec internal routine, since we want to avoid the dead
 * driver recovery mechanism here. This function is actually called 
 * during the recovery.
 */
  message m;
  int r;

  /* Fill in request message */
  m.m_type = REQ_NEW_DRIVER;
  m.REQ_DEV = dev;
  m.REQ_DRIVER_E = driver_e;

  /* Issue request */
  if((r = sendrec(fs_e, &m)) != OK) {
	  printf("%s:%d VFS req_newdriver: error sending message %d to %d\n",
		 __FILE__, __LINE__, r, fs_e);
	  util_stacktrace();
	  return(r);
  }

  return(OK);
}
コード例 #2
0
ファイル: vnode.c プロジェクト: DragonQuan/minix3
/*===========================================================================*
 *				put_vnode				     *
 *===========================================================================*/
PUBLIC void put_vnode(struct vnode *vp)
{
/* Decrease vnode's usage counter and decrease inode's usage counter in the
 * corresponding FS process. Decreasing the fs_count each time we decrease the
 * ref count would lead to poor performance. Instead, only decrease fs_count
 * when the ref count hits zero. However, this could lead to fs_count to wrap.
 * To prevent this, we drop the counter to 1 when the counter hits 256.
 * We maintain fs_count as a sanity check to make sure VFS and the FS are in
 * sync.
 */
  int r, lock_vp;

  ASSERTVP(vp);

  /* Lock vnode. It's quite possible this thread already has a lock on this
   * vnode. That's no problem, because the reference counter will not decrease
   * to zero in that case. However, if the counter does decrease to zero *and*
   * is already locked, we have a consistency problem somewhere. */
  lock_vp = lock_vnode(vp, VNODE_OPCL);

  if (vp->v_ref_count > 1) {
	/* Decrease counter */
	vp->v_ref_count--;
	if (vp->v_fs_count > 256)
		vnode_clean_refs(vp);
	if (lock_vp != EBUSY) unlock_vnode(vp);
	return;
  }

  /* If we already had a lock, there is a consistency problem */
  assert(lock_vp != EBUSY);
  tll_upgrade(&vp->v_lock);	/* Make sure nobody else accesses this vnode */

  /* A vnode that's not in use can't be put back. */
  if (vp->v_ref_count <= 0)
	panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count);

  /* fs_count should indicate that the file is in use. */
  if (vp->v_fs_count <= 0)
	panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count);

  /* Tell FS we don't need this inode to be open anymore. */
  r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count);

  if (r != OK) {
	printf("VFS: putnode failed: %d\n", r);
	util_stacktrace();
  }

  /* This inode could've been mapped. If so, tell mapped FS to close it as
   * well. If mapped onto same FS, this putnode is not needed. */
  if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e)
	req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count);

  vp->v_fs_count = 0;
  vp->v_ref_count = 0;
  vp->v_mapfs_count = 0;

  unlock_vnode(vp);
}
コード例 #3
0
ファイル: mem_shared.c プロジェクト: anuragpeshne/minix
static int getsrc(struct vir_region *region,
	struct vmproc **vmp, struct vir_region **r)
{
	int srcproc;

	if(region->def_memtype != &mem_type_shared) {
		printf("shared region hasn't shared type but %s.\n",
			region->def_memtype->name);
		return EINVAL;
	}

	if(!region->param.shared.ep || !region->param.shared.vaddr) {
		printf("shared region has not defined source region.\n");
		util_stacktrace();
		return EINVAL;
	}

        if(vm_isokendpt((endpoint_t) region->param.shared.ep, &srcproc) != OK) {
		printf("VM: shared memory with missing source process.\n");
		util_stacktrace();
                return EINVAL;
	}

	*vmp = &vmproc[srcproc];

	if(!(*r=map_lookup(*vmp, region->param.shared.vaddr, NULL))) {
		printf("VM: shared memory with missing vaddr 0x%lx.\n",
			region->param.shared.vaddr);
                return EINVAL;
	}

	if((*r)->def_memtype != &mem_type_anon) {
		printf("source region hasn't anon type but %s.\n",
			(*r)->def_memtype->name);
		return EINVAL;
	}

	if(region->param.shared.id != (*r)->id) {
		printf("source region has no matching id\n");
		return EINVAL;
	}

	return OK;
}
コード例 #4
0
ファイル: main.c プロジェクト: Sciumo/minix
/*===========================================================================*
 *				reply					     *
 *===========================================================================*/
void reply(endpoint_t whom, int result)
{
/* Send a reply to a user process.  If the send fails, just ignore it. */
  int r;

  m_out.reply_type = result;
  r = sendnb(whom, &m_out);
  if (r != OK) {
	printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(),
		result, whom, r);
	util_stacktrace();
  }
}
コード例 #5
0
/*===========================================================================*
 *				panic				     *
 *===========================================================================*/
void panic(const char *fmt, ...)
{
/* Something awful has happened. Panics are caused when an internal
 * inconsistency is detected, e.g., a programming error or illegal 
 * value of a defined constant.
 */
  endpoint_t me = NONE;
  char name[20];
  int priv_flags;
  int init_flags;
  void (*suicide)(void);
  va_list args;

  if(sys_whoami(&me, name, sizeof(name), &priv_flags, &init_flags) == OK && me != NONE)
	printf("%s(%d): panic: ", name, me);
  else
	printf("(sys_whoami failed): panic: ");

  if(fmt) {
	va_start(args, fmt);
	vprintf(fmt, args);
	va_end(args);
  } else {
	printf("no message\n");
  }
  printf("\n");

  printf("syslib:panic.c: stacktrace: ");
  util_stacktrace();

  panic_hook();

  /* Try exit */
  _exit(1);

  /* Try to signal ourself */
  abort();

  /* If exiting nicely through PM fails for some reason, try to
   * commit suicide. E.g., message to PM might fail due to deadlock.
   */
  suicide = (void (*)(void)) -1;
  suicide();

  /* If committing suicide fails for some reason, hang. */
  for(;;) { }
}
コード例 #6
0
ファイル: utility.c プロジェクト: 7shi/minix-tools
/*===========================================================================*
 *			minix_panic                                        *
 *===========================================================================*/
PUBLIC void minix_panic(char *mess,int nr)
{
    /* The system has run aground of a fatal kernel error. Terminate execution. */
    if (minix_panicing++) {
        arch_monitor();
    }

    if (mess != NULL) {
        kprintf("kernel panic: %s", mess);
        if(nr != NO_NUM)
            kprintf(" %d", nr);
        kprintf("\n");
    }

    kprintf("kernel: ");
    util_stacktrace();

    /* Abort MINIX. */
    minix_shutdown(NULL);
}
コード例 #7
0
ファイル: panic.c プロジェクト: locosoft1986/nucleos
void kernel_panic(char *mess, int nr)
{
	/* But what if we are already in panic? */
	kernel_in_panic++;

	if (mess != NULL) {
		printk("kernel panic: %s", mess);

		if(nr != NO_NUM)
			printk(" %d", nr);

		printk("\n");
	}

	printk("kernel: ");
	util_stacktrace();

	/* Abort Nucleos. */
	nucleos_shutdown(RBT_HALT);
}
コード例 #8
0
ファイル: utility.c プロジェクト: biswajit1983/minix-nbsd
/*===========================================================================*
 *			panic                                          *
 *===========================================================================*/
PUBLIC void panic(const char *fmt, ...)
{
  va_list arg;
  /* The system has run aground of a fatal kernel error. Terminate execution. */
  if (minix_panicing == ARE_PANICING) {
	arch_monitor();
  }
  minix_panicing = ARE_PANICING;
  if (fmt != NULL) {
	printf("kernel panic: ");
  	va_start(arg, fmt);
	vprintf(fmt, arg);
	printf("\n");
  }

  printf("kernel: ");
  util_stacktrace();

  /* Abort MINIX. */
  minix_shutdown(NULL);
}
コード例 #9
0
ファイル: comm.c プロジェクト: Hooman3/minix
/*===========================================================================*
 *				sendmsg					     *
 *===========================================================================*/
static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp)
{
/* This is the low level function that sends requests.
 * Currently to FSes or VM.
 */
  int r, transid;

  if(vmp) vmp->m_comm.c_cur_reqs++;	/* One more request awaiting a reply */
  transid = wp->w_tid + VFS_TRANSID;
  wp->w_sendrec->m_type = TRNS_ADD_ID(wp->w_sendrec->m_type, transid);
  wp->w_task = dst;
  if ((r = asynsend3(dst, wp->w_sendrec, AMF_NOREPLY)) != OK) {
	printf("VFS: sendmsg: error sending message. "
		"dest: %d req_nr: %d err: %d\n", dst,
			wp->w_sendrec->m_type, r);
	util_stacktrace();
	return(r);
  }

  return(r);
}
コード例 #10
0
ファイル: comm.c プロジェクト: Hooman3/minix
/*===========================================================================*
 *				drv_sendrec				     *
 *===========================================================================*/
int drv_sendrec(endpoint_t drv_e, message *reqmp)
{
	int r;
	struct dmap *dp;

	/* For the CTTY_MAJOR case, we would actually have to lock the device
	 * entry being redirected to.  However, the CTTY major only hosts a
	 * character device while this function is used only for block devices.
	 * Thus, we can simply deny the request immediately.
	 */
	if (drv_e == CTTY_ENDPT) {
		printf("VFS: /dev/tty is not a block device!\n");
		return EIO;
	}

	if ((dp = get_dmap(drv_e)) == NULL)
		panic("driver endpoint %d invalid", drv_e);

	lock_dmap(dp);
	if (dp->dmap_servicing != INVALID_THREAD)
		panic("driver locking inconsistency");
	dp->dmap_servicing = self->w_tid;
	self->w_task = drv_e;
	self->w_drv_sendrec = reqmp;

	if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) {
		/* Yield execution until we've received the reply */
		worker_wait();
	} else {
		printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n",
			drv_e, r);
		util_stacktrace();
	}

	dp->dmap_servicing = INVALID_THREAD;
	self->w_task = NONE;
	self->w_drv_sendrec = NULL;
	unlock_dmap(dp);
	return(OK);
}
コード例 #11
0
ファイル: arch_system.c プロジェクト: bdeepak77/minix3
void arch_proc_setcontext(struct proc *p, struct stackframe_s *state,
	int isuser, int trap_style)
{
	if(isuser) {
		/* Restore user bits of psw from sc, maintain system bits
		 * from proc.
		 */
		state->psw  =  (state->psw & X86_FLAGS_USER) |
			(p->p_reg.psw & ~X86_FLAGS_USER);
	}

	/* someone wants to totally re-initialize process state */
	assert(sizeof(p->p_reg) == sizeof(*state));
	memcpy(&p->p_reg, state, sizeof(*state));

	/* further code is instructed to not touch the context
	 * any more
	 */
	p->p_misc_flags |= MF_CONTEXT_SET;

	/* on x86 this requires returning using iret (KTS_INT)
	 * so that the full context is restored instead of relying on
	 * the userspace doing it (as it would do on SYSEXIT).
	 * as ESP and EIP are also reset, userspace won't try to
	 * restore bogus context after returning.
	 *
	 * if the process is not blocked, or the kernel will ignore
	 * our trap style, we needn't panic but things will probably
	 * not go well for the process (restored context will be ignored)
	 * and the situation should be debugged.
	 */
	if(!(p->p_rts_flags)) {
		printf("WARNINIG: setting full context of runnable process\n");
		print_proc(p);
		util_stacktrace();
	}
	if(p->p_seg.p_kern_trap_style == KTS_NONE)
		printf("WARNINIG: setting full context of out-of-kernel process\n");
	p->p_seg.p_kern_trap_style = trap_style;
}
コード例 #12
0
ファイル: alloc.c プロジェクト: anuragpeshne/minix
/*===========================================================================*
 *				usedpages_add				     *
 *===========================================================================*/
int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
{
	u32_t pagestart, pages;

	if(!incheck)
		return OK;

	assert(!(addr % VM_PAGE_SIZE));
	assert(!(len % VM_PAGE_SIZE));
	assert(len > 0);

	pagestart = addr / VM_PAGE_SIZE;
	pages = len / VM_PAGE_SIZE;

	while(pages > 0) {
		phys_bytes thisaddr;
		assert(pagestart > 0);
		assert(pagestart < NUMBER_PHYSICAL_PAGES);
		thisaddr = pagestart * VM_PAGE_SIZE;
		assert(pagestart >= 0);
		assert(pagestart < NUMBER_PHYSICAL_PAGES);
		if(pagemap[pagestart].used) {
			static int warnings = 0;
			if(warnings++ < 100)
				printf("%s:%d: usedpages_add: addr 0x%lx reused, first %s:%d\n",
					file, line, thisaddr, pagemap[pagestart].file, pagemap[pagestart].line);
			util_stacktrace();
			return EFAULT;
		}
		pagemap[pagestart].used = 1;
		pagemap[pagestart].file = file;
		pagemap[pagestart].line = line;
		pages--;
		pagestart++;
	}

	return OK;
}
コード例 #13
0
ファイル: utility.c プロジェクト: kemurphy/minix
/*===========================================================================*
 *			panic                                          *
 *===========================================================================*/
PUBLIC void panic(const char *fmt, ...)
{
  va_list arg;
  /* The system has run aground of a fatal kernel error. Terminate execution. */
  if (minix_panicing == ARE_PANICING) {
  	reset();
  }
  minix_panicing = ARE_PANICING;
  if (fmt != NULL) {
	printf("kernel panic: ");
  	va_start(arg, fmt);
	vprintf(fmt, arg);
	printf("\n");
  }

  printf("kernel on CPU %d: ", cpuid);
  util_stacktrace();

  printf("current process : ");
  proc_stacktrace(get_cpulocal_var(proc_ptr));

  /* Abort MINIX. */
  minix_shutdown(NULL);
}
コード例 #14
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    pt = &vmprocess->vm_pt;
    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pages);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
                      ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
                      | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
#endif
                      , 0)) != OK) {
        free_mem(newpage, pages);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        panic("VMCTL_FLUSHTLB failed: %d", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) loc;

    vm_self_pages++;
    return ret;
}
コード例 #15
0
ファイル: cache.c プロジェクト: wieck/minix
/*===========================================================================*
 *				lmfs_get_block_ino			     *
 *===========================================================================*/
struct buf *lmfs_get_block_ino(dev_t dev, block_t block, int only_search,
	ino_t ino, u64_t ino_off)
{
/* Check to see if the requested block is in the block cache.  If so, return
 * a pointer to it.  If not, evict some other block and fetch it (unless
 * 'only_search' is 1).  All the blocks in the cache that are not in use
 * are linked together in a chain, with 'front' pointing to the least recently
 * used block and 'rear' to the most recently used block.  If 'only_search' is
 * 1, the block being requested will be overwritten in its entirety, so it is
 * only necessary to see if it is in the cache; if it is not, any free buffer
 * will do.  It is not necessary to actually read the block in from disk.
 * If 'only_search' is PREFETCH, the block need not be read from the disk,
 * and the device is not to be marked on the block, so callers can tell if
 * the block returned is valid.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */

  int b;
  static struct buf *bp;
  u64_t dev_off = (u64_t) block * fs_block_size;
  struct buf *prev_ptr;

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  assert(dev != NO_DEV);

  if((ino_off % fs_block_size)) {

	printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n",
		ino_off);
  	util_stacktrace();
  }

  /* Search the hash chain for (dev, block). */
  b = BUFHASH(block);
  bp = buf_hash[b];
  while (bp != NULL) {
  	if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) {
  		if(bp->lmfs_flags & VMMC_EVICTED) {
  			/* We had it but VM evicted it; invalidate it. */
  			ASSERT(bp->lmfs_count == 0);
  			ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
  			ASSERT(!(bp->lmfs_flags & VMMC_DIRTY));
  			bp->lmfs_dev = NO_DEV;
  			bp->lmfs_bytes = 0;
  			bp->data = NULL;
  			break;
  		}
  		ASSERT(bp->lmfs_needsetcache == 0);
  		/* Block needed has been found. */
  		if (bp->lmfs_count == 0) {
			rm_lru(bp);
  			ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
			bp->lmfs_flags |= VMMC_BLOCK_LOCKED;
		}
		raisecount(bp);
  		ASSERT(bp->lmfs_bytes == fs_block_size);
  		ASSERT(bp->lmfs_dev == dev);
  		ASSERT(bp->lmfs_dev != NO_DEV);
 		ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
  		ASSERT(bp->data);

		if(ino != VMC_NO_INODE) {
			if(bp->lmfs_inode == VMC_NO_INODE
			|| bp->lmfs_inode != ino
			|| bp->lmfs_inode_offset != ino_off) {
				bp->lmfs_inode = ino;
				bp->lmfs_inode_offset = ino_off;
				bp->lmfs_needsetcache = 1;
			}
		}

  		return(bp);
  	} else {
  		/* This block is not the one sought. */
  		bp = bp->lmfs_hash; /* move to next block on hash chain */
  	}
  }

  /* Desired block is not on available chain. Find a free block to use. */
  if(bp) {
  	ASSERT(bp->lmfs_flags & VMMC_EVICTED);
  } else {
	if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
  }
  assert(bp);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->lmfs_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->lmfs_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->lmfs_hash != NULL)
		if (prev_ptr->lmfs_hash == bp) {
			prev_ptr->lmfs_hash = bp->lmfs_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->lmfs_hash;	/* keep looking */
		}
  }

  freeblock(bp);

  bp->lmfs_inode = ino;
  bp->lmfs_inode_offset = ino_off;

  bp->lmfs_flags = VMMC_BLOCK_LOCKED;
  bp->lmfs_needsetcache = 0;
  bp->lmfs_dev = dev;		/* fill in device number */
  bp->lmfs_blocknr = block;	/* fill in block number */
  ASSERT(bp->lmfs_count == 0);
  raisecount(bp);
  b = BUFHASH(bp->lmfs_blocknr);
  bp->lmfs_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  assert(dev != NO_DEV);

  /* Block is not found in our cache, but we do want it
   * if it's in the vm cache.
   */
  assert(!bp->data);
  assert(!bp->lmfs_bytes);
  if(vmcache) {
	if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off,
		&bp->lmfs_flags, fs_block_size)) != MAP_FAILED) {
		bp->lmfs_bytes = fs_block_size;
		ASSERT(!bp->lmfs_needsetcache);
		return bp;
	}
  }
  bp->data = NULL;

  /* Not in the cache; reserve memory for its contents. */

  lmfs_alloc_block(bp);

  assert(bp->data);

  if(only_search == PREFETCH) {
	/* PREFETCH: don't do i/o. */
	bp->lmfs_dev = NO_DEV;
  } else if (only_search == NORMAL) {
	read_block(bp);
  } else if(only_search == NO_READ) {
  	/* This block will be overwritten by new contents. */
  } else
	panic("unexpected only_search value: %d", only_search);

  assert(bp->data);

  return(bp);			/* return the newly acquired block */
}
コード例 #16
0
/*===========================================================================*
 *				get_block_ino				     *
 *===========================================================================*/
static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how,
	ino_t ino, u64_t ino_off, size_t block_size)
{
/* Check to see if the requested block is in the block cache.  The requested
 * block is identified by the block number in 'block' on device 'dev', counted
 * in the file system block size.  The amount of data requested for this block
 * is given in 'block_size', which may be less than the file system block size
 * iff the requested block is the last (partial) block on a device.  Note that
 * the given block size does *not* affect the conversion of 'block' to a byte
 * offset!  Either way, if the block could be obtained, either from the cache
 * or by reading from the device, return OK, with a pointer to the buffer
 * structure stored in 'bpp'.  If not, return a negative error code (and no
 * buffer).  If necessary, evict some other block and fetch the contents from
 * disk (if 'how' is NORMAL).  If 'how' is NO_READ, the caller intends to
 * overwrite the requested block in its entirety, so it is only necessary to
 * see if it is in the cache; if it is not, any free buffer will do.  If 'how'
 * is PEEK, the function returns the block if it is in the cache or the VM
 * cache, and an ENOENT error code otherwise.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */
  int b, r;
  static struct buf *bp;
  uint64_t dev_off;
  struct buf *prev_ptr;

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  assert(dev != NO_DEV);

  assert(block <= UINT64_MAX / fs_block_size);

  dev_off = block * fs_block_size;

  if((ino_off % fs_block_size)) {

	printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n",
		ino_off);
  	util_stacktrace();
  }

  /* See if the block is in the cache. If so, we can return it right away. */
  bp = find_block(dev, block);
  if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) {
	ASSERT(bp->lmfs_dev == dev);
	ASSERT(bp->lmfs_dev != NO_DEV);

	/* The block must have exactly the requested number of bytes. */
	if (bp->lmfs_bytes != block_size)
		return EIO;

	/* Block needed has been found. */
	if (bp->lmfs_count == 0) {
		rm_lru(bp);
		ASSERT(bp->lmfs_needsetcache == 0);
		ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
		/* FIXME: race condition against the VMMC_EVICTED check */
		bp->lmfs_flags |= VMMC_BLOCK_LOCKED;
	}
	raisecount(bp);
	ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
	ASSERT(bp->data);

	if(ino != VMC_NO_INODE) {
		if(bp->lmfs_inode == VMC_NO_INODE
		|| bp->lmfs_inode != ino
		|| bp->lmfs_inode_offset != ino_off) {
			bp->lmfs_inode = ino;
			bp->lmfs_inode_offset = ino_off;
			bp->lmfs_needsetcache = 1;
		}
	}

	*bpp = bp;
	return OK;
  }

  /* We had the block in the cache but VM evicted it; invalidate it. */
  if (bp != NULL) {
	assert(bp->lmfs_flags & VMMC_EVICTED);
	ASSERT(bp->lmfs_count == 0);
	ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
	ASSERT(!(bp->lmfs_flags & VMMC_DIRTY));
	bp->lmfs_dev = NO_DEV;
	bp->lmfs_bytes = 0;
	bp->data = NULL;
  }

  /* Desired block is not on available chain. Find a free block to use. */
  if(bp) {
  	ASSERT(bp->lmfs_flags & VMMC_EVICTED);
  } else {
	if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
  }
  assert(bp);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->lmfs_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->lmfs_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->lmfs_hash != NULL)
		if (prev_ptr->lmfs_hash == bp) {
			prev_ptr->lmfs_hash = bp->lmfs_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->lmfs_hash;	/* keep looking */
		}
  }

  freeblock(bp);

  bp->lmfs_inode = ino;
  bp->lmfs_inode_offset = ino_off;

  bp->lmfs_flags = VMMC_BLOCK_LOCKED;
  bp->lmfs_needsetcache = 0;
  bp->lmfs_dev = dev;		/* fill in device number */
  bp->lmfs_blocknr = block;	/* fill in block number */
  ASSERT(bp->lmfs_count == 0);
  raisecount(bp);
  b = BUFHASH(bp->lmfs_blocknr);
  bp->lmfs_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  assert(dev != NO_DEV);

  /* The block is not found in our cache, but we do want it if it's in the VM
   * cache. The exception is NO_READ, purely for context switching performance
   * reasons. NO_READ is used for 1) newly allocated blocks, 2) blocks being
   * prefetched, and 3) blocks about to be fully overwritten. In the first two
   * cases, VM will not have the block in its cache anyway, and for the third
   * we save on one VM call only if the block is in the VM cache.
   */
  assert(!bp->data);
  assert(!bp->lmfs_bytes);
  if (how != NO_READ && vmcache) {
	if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off,
	    &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) {
		bp->lmfs_bytes = block_size;
		ASSERT(!bp->lmfs_needsetcache);
		*bpp = bp;
		return OK;
	}
  }
  bp->data = NULL;

  /* The block is not in the cache, and VM does not know about it. If we were
   * requested to search for the block only, we can now return failure to the
   * caller. Return the block to the pool without allocating data pages, since
   * these would be freed upon recycling the block anyway.
   */
  if (how == PEEK) {
	bp->lmfs_dev = NO_DEV;

	put_block(bp, ONE_SHOT);

	return ENOENT;
  }

  /* Not in the cache; reserve memory for its contents. */

  lmfs_alloc_block(bp, block_size);

  assert(bp->data);

  if (how == NORMAL) {
	/* Try to read the block. Return an error code on failure. */
	if ((r = read_block(bp, block_size)) != OK) {
		put_block(bp, 0);

		return r;
	}
  } else if(how == NO_READ) {
  	/* This block will be overwritten by new contents. */
  } else
	panic("unexpected 'how' value: %d", how);

  assert(bp->data);

  *bpp = bp;			/* return the newly acquired block */
  return OK;
}
コード例 #17
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    static int level = 0;
    void *ret;
    u32_t mem_flags = 0;

    assert(reason >= 0 && reason < VMP_CATEGORIES);

    assert(pages > 0);

    level++;

    assert(level >= 1);
    assert(level <= 2);

    if((level > 1) || !pt_init_done) {
        void *s;

        if(pages == 1) s=vm_getsparepage(phys);
        else if(pages == 4) s=vm_getsparepagedir(phys);
        else panic("%d pages", pages);

        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        if(!is_staticaddr(s)) vm_self_pages++;
        return s;
    }

#if defined(__arm__)
    if (reason == VMP_PAGEDIR) {
        mem_flags |= PAF_ALIGN16K;
    }
#endif

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: alloc_mem failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    if(!(ret = vm_mappages(*phys, pages))) {
        level--;
        printf("VM: vm_allocpage: vm_mappages failed\n");
        return NULL;
    }

    level--;
    vm_self_pages++;

    return ret;
}
コード例 #18
0
ファイル: pagetable.c プロジェクト: mwilbur/minix
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
/* Allocate a page for use by VM itself. */
	phys_bytes newpage;
	vir_bytes loc;
	pt_t *pt;
	int r;
	static int level = 0;
	void *ret;

	pt = &vmprocess->vm_pt;
	assert(reason >= 0 && reason < VMP_CATEGORIES);

	level++;

	assert(level >= 1);
	assert(level <= 2);

	if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
		int r;
		void *s;
		s=vm_getsparepage(phys);
		level--;
		if(!s) {
			util_stacktrace();
			printf("VM: warning: out of spare pages\n");
		}
		return s;
	}

	/* VM does have a pagetable, so get a page and map it in there.
	 * Where in our virtual address space can we put it?
	 */
	loc = findhole(pt,  arch_vir2map(vmprocess, vmprocess->vm_stacktop),
		vmprocess->vm_arch.vm_data_top);
	if(loc == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: findhole failed\n");
		return NULL;
	}

	/* Allocate page of memory for use by VM. As VM
	 * is trusted, we don't have to pre-clear it.
	 */
	if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
		level--;
		printf("VM: vm_allocpage: alloc_mem failed\n");
		return NULL;
	}

	*phys = CLICK2ABS(newpage);

	/* Map this page into our address space. */
	if((r=pt_writemap(vmprocess, pt, loc, *phys, I386_PAGE_SIZE,
		I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
		free_mem(newpage, CLICKSPERPAGE);
		printf("vm_allocpage writemap failed\n");
		level--;
		return NULL;
	}

	if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
		panic("VMCTL_FLUSHTLB failed: %d", r);
	}

	level--;

	/* Return user-space-ready pointer to it. */
	ret = (void *) arch_map2vir(vmprocess, loc);

	return ret;
}