/*===========================================================================* * adjust_proc_slot * *===========================================================================*/ static void adjust_proc_slot(struct proc *rp, struct proc *from_rp) { /* Preserve endpoints, slot numbers, priv structure, and IPC. */ rp->p_endpoint = from_rp->p_endpoint; rp->p_nr = from_rp->p_nr; rp->p_priv = from_rp->p_priv; priv(rp)->s_proc_nr = from_rp->p_nr; rp->p_caller_q = from_rp->p_caller_q; /* preserve scheduling */ rp->p_scheduler = from_rp->p_scheduler; #ifdef CONFIG_SMP rp->p_cpu = from_rp->p_cpu; memcpy(rp->p_cpu_mask, from_rp->p_cpu_mask, sizeof(bitchunk_t) * BITMAP_CHUNKS(CONFIG_MAX_CPUS)); #endif }
/*===========================================================================* * lmfs_prefetch * *===========================================================================*/ void lmfs_prefetch(dev_t dev, const block64_t *blockset, unsigned int nblocks) { /* The given set of blocks is expected to be needed soon, so prefetch a * convenient subset. The blocks are expected to be sorted by likelihood of * being accessed soon, making the first block of the set the most important * block to prefetch right now. The caller must have made sure that the blocks * are not in the cache already. The array may have duplicate block numbers. */ bitchunk_t blocks_before[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)]; bitchunk_t blocks_after[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)]; block64_t block, base_block; unsigned int i, bit, nr_before, nr_after, span, limit, nr_blocks; if (nblocks == 0) return; /* Here is the deal. We are going to prefetch one range only, because seeking * is too expensive for just prefetching. The range we select should at least * include the first ("base") block of the given set, since that is the block * the caller is primarily interested in. Thus, the rest of the range is * going to have to be directly around this base block. We first check which * blocks from the set fall just before and after the base block, which then * allows us to construct a contiguous range of desired blocks directly * around the base block, in O(n) time. As a natural part of this, we ignore * duplicate blocks in the given set. We then read from the beginning of this * range, in order to maximize the chance that a next prefetch request will * continue from the last disk position without requiring a seek. However, we * do correct for the maximum number of blocks we can (or should) read in at * once, such that we will still end up reading the base block. */ base_block = blockset[0]; memset(blocks_before, 0, sizeof(blocks_before)); memset(blocks_after, 0, sizeof(blocks_after)); for (i = 1; i < nblocks; i++) { block = blockset[i]; if (block < base_block && block + LMFS_MAX_PREFETCH >= base_block) { bit = base_block - block - 1; assert(bit < LMFS_MAX_PREFETCH); SET_BIT(blocks_before, bit); } else if (block > base_block && block - LMFS_MAX_PREFETCH <= base_block) { bit = block - base_block - 1; assert(bit < LMFS_MAX_PREFETCH); SET_BIT(blocks_after, bit); } } for (nr_before = 0; nr_before < LMFS_MAX_PREFETCH; nr_before++) if (!GET_BIT(blocks_before, nr_before)) break; for (nr_after = 0; nr_after < LMFS_MAX_PREFETCH; nr_after++) if (!GET_BIT(blocks_after, nr_after)) break; /* The number of blocks to prefetch is the minimum of two factors: the number * of blocks in the range around the base block, and the maximum number of * blocks that should be read ahead at once at all. */ span = nr_before + 1 + nr_after; limit = lmfs_readahead_limit(); nr_blocks = MIN(span, limit); assert(nr_blocks >= 1 && nr_blocks <= LMFS_MAX_PREFETCH); /* Start prefetching from the lowest block within the contiguous range, but * make sure that we read at least the original base block itself, too. */ base_block -= MIN(nr_before, nr_blocks - 1); lmfs_readahead(dev, base_block, nr_blocks, fs_block_size); }
#include <dirent.h> #include <assert.h> #include "file.h" #include "fproc.h" #include "dmap.h" #include <minix/vfsif.h> #include "vnode.h" #include "vmnt.h" #include "path.h" #include "param.h" /* Allow the root to be replaced before the first 'real' mount. */ static int have_root = 0; /* Bitmap of in-use "none" pseudo devices. */ static bitchunk_t nonedev[BITMAP_CHUNKS(NR_NONEDEVS)] = { 0 }; #define alloc_nonedev(dev) SET_BIT(nonedev, minor(dev) - 1) #define free_nonedev(dev) UNSET_BIT(nonedev, minor(dev) - 1) static dev_t name_to_dev(int allow_mountpt, char path[PATH_MAX]); static dev_t find_free_nonedev(void); static void update_bspec(dev_t dev, endpoint_t fs_e, int send_drv_e); /*===========================================================================* * update_bspec * *===========================================================================*/ static void update_bspec(dev_t dev, endpoint_t fs_e, int send_drv_e) { /* Update all block special files for a certain device, to use a new FS endpt * to route raw block I/O requests through.
#include <dirent.h> #include <assert.h> #include "file.h" #include "fproc.h" #include "dmap.h" #include <minix/vfsif.h> #include "vnode.h" #include "vmnt.h" #include "path.h" #include "param.h" /* Allow the root to be replaced before the first 'real' mount. */ PRIVATE int have_root = 0; /* Bitmap of in-use "none" pseudo devices. */ PRIVATE bitchunk_t nonedev[BITMAP_CHUNKS(NR_NONEDEVS)] = { 0 }; #define alloc_nonedev(dev) SET_BIT(nonedev, minor(dev) - 1) #define free_nonedev(dev) UNSET_BIT(nonedev, minor(dev) - 1) FORWARD _PROTOTYPE( dev_t name_to_dev, (int allow_mountpt, char path[PATH_MAX]) ); FORWARD _PROTOTYPE( dev_t find_free_nonedev, (void) ); FORWARD _PROTOTYPE( void update_bspec, (dev_t dev, endpoint_t fs_e, int send_drv_e) ); /*===========================================================================* * update_bspec * *===========================================================================*/ PRIVATE void update_bspec(dev_t dev, endpoint_t fs_e, int send_drv_e) {