示例#1
0
文件: dbmem.c 项目: cfrantz/pongo
void dbfile_sync(pgctx_t *ctx)
{
	//uint32_t t0, t1;
	//t0 = utime_now();
	mm_sync(&ctx->mm);
	//t1 = utime_now();
	//log_debug("sync took %dus", t1-t0);
}
示例#2
0
int
ms_unmap(ADDRESS *adp, struct map_set *ms, int unmap_flags) {
	struct mm_map			*mm;
	struct mm_map			*end_mm;
	struct mm_object_ref	*or;
	struct unmap_data		data;
	OBJECT					*obp;
	int						haslock;
	int						has_unmapped = 0;

	CRASHCHECK(adp == NULL);
	data.prp =  object_from_data(adp, address_cookie);
	
	proc_lock_owner_mark(data.prp);

	if(!(unmap_flags & ~(UNMAP_PRIVATIZE|UNMAP_NORLIMIT))) {
		unmap_flags |= munmap_flags_default;
	}
	data.unmap_flags = unmap_flags;
	end_mm = ms->last->next;
	for(mm = ms->first; mm != end_mm; mm = mm->next) {
		if(mm->extra_flags & EXTRA_FLAG_SPECIAL) {
			if(!(data.prp->flags & (_NTO_PF_LOADING|_NTO_PF_TERMING))) {
				ms->flags |= MI_SKIP_SPECIAL;
				continue;
			}
		}
		has_unmapped = 1;
		if(!(data.unmap_flags & UNMAP_NORLIMIT)) {
			size_t		size;

			size = (mm->end - mm->start) + 1;
			adp->rlimit.vmem -= size;
			if (mm->extra_flags & EXTRA_FLAG_RLIMIT_DATA) {
				adp->rlimit.data -= size;
			}
			if(mm->extra_flags & EXTRA_FLAG_LOCK) {
				//RUSH1: BUG: This is no good - we need to call a function in 
				//RUSH1: vmm_munlock.c to actually unlock the range for the mm_map.
				adp->rlimit.memlock -= size;
			}
		}
		or = mm->obj_ref;
		if(or != NULL) {
			obp = or->obp;
			haslock = memobj_cond_lock(obp);

			//FUTURE: If we start turning on MM_MEM_HAS_SYNC
			//FUTURE: in vmm_vaddr_to_memobj(), we can check
			//FUTURE: it here and avoid the individual page checks

			//FUTURE: If the map region has never had write permissions,
			//FUTURE: we can skip this check, since we obviously can't
			//FUTURE: own a mutex: can't just check PROT_WRITE, since
			//FUTURE: we might have gotten the mutex and then turned off
			//FUTURE: PROT_WRITE with mprotect() - silly, but possible.
			//FUTURE: The loader threads will need to say that even though
			//FUTURE: they had PROT_WRITE on for a while, it doesn't count
			//FUTURE: against this check.
			//FUTURE: Since clean_pmem is now doing more than just dealing
			//FUTURE: with the sync object, have to be more careful about
			//FUTURE: skipping the call.

			data.mm = mm;
			data.flags = 0;
			(void)memobj_pmem_walk_mm(MPW_PHYS|MPW_SYSRAM, mm, clean_pmem, &data);

			if((data.flags & PAQ_FLAG_MODIFIED) 
			 && !(mm->mmap_flags & MAP_NOSYNCFILE)
			 && (obp->hdr.type == OBJECT_MEM_FD)
			 && (obp->mem.mm.flags & MM_FDMEM_NEEDSSYNC)) {
				//RUSH1: What to do if mm_sync() returns non-EOK?
				(void)mm_sync(obp, mm, MS_ASYNC);
			}

			//RUSH3: The mm_sync() call is also going to walk the
			//RUSH3: memory reference list. Can we combine the two?
			data.start = mm->offset;
			data.end   = mm->offset + (mm->end - mm->start);
			data.check_limit = 100;
			//RUSH3: Make more object oriented
			switch(obp->hdr.type) {
			case OBJECT_MEM_ANON:	
				if(obp->mem.mm.flags & MM_ANMEM_MULTI_REFS) {
					// We can't bail out early or we'll leak anon memory.
					data.check_limit = ~0;
				} else {
					// We can skip check_last_ref() if it's an anonymous object
					// with no mapping of "/proc/<pid>/as".
					data.check_limit = 0;
				}
				break;
			case OBJECT_MEM_SHARED:	
				if(obp->hdr.refs != 0) {
					// Can't free any memory yet.
					data.check_limit = 0;
					data.start = data.end + 1;
				}
				break;
			case OBJECT_MEM_FD:
				//RUSH3: If could tell if there were no open fd's,
				//RUSH3: we could start freeing memory here.
				data.check_limit = 0;
				data.start = data.end + 1;
				break;
			case OBJECT_MEM_TYPED:
				//RUSH3: Are there some conditions that will allow us
				//RUSH3: to skip the check? MAP_PRIVATE? non-allocable?
				// Can't bail out early or we'll leak memory
				data.check_limit = ~0;
				break;
			default: break;
			}
			if(data.check_limit != 0) {
				memref_walk(obp, check_last_ref, &data);
			}

			
#if defined(CPU_GBL_VADDR_START)
			//RUSH3: CPUism. ARM specific
			if(mm->extra_flags & EXTRA_FLAG_GBL_VADDR) {
				switch(obp->hdr.type) {
				case OBJECT_MEM_SHARED:
					// Tell CPU specific code this region is being unmapped
					cpu_gbl_unmap(adp, mm->start, mm->end, obp->mem.mm.flags);

					if (!(obp->mem.mm.flags & SHMCTL_GLOBAL)) {
						// Mapping is for this process only, so unmap it now
						// and release the global address range
						pte_unmap(adp, mm->start, mm->end, obp);
						(void)GBL_VADDR_UNMAP(mm->start, (mm->end - mm->start) + 1);
					} else if (data.start < data.end) {
						uintptr_t	va_start;
						uintptr_t	va_end;

						// Unmap only the range that is not referenced by
						// other processes
						// We can only release the global address range used
						// for the entire object when the object is destroyed.
						// The GBL_VADDR_UNMAP() is done in vmm_resize(). 
						va_start = mm->start + (uintptr_t)(data.start - mm->offset);
						va_end   = mm->start + (data.end - mm->offset);
						pte_unmap(adp, va_start, va_end, obp);
					}
					break;
				default: break;
				}
			} else
#endif
			{
				// If none of the quantums has the PAQ_FLAG_INITIALIZED
				// bit on (from clean_pmem), we know that we can't have set
				// up any page table entries, and can skip the unmapping
				if(data.flags & PAQ_FLAG_INITIALIZED) {
					pte_unmap(adp, mm->start, mm->end, obp);
				}
			}
			memref_del(mm);

			if(obp->hdr.type != OBJECT_MEM_ANON) {
				if(((mm->mmap_flags & (MAP_LAZY|MAP_TYPE)) == MAP_PRIVATE) 
					&& !(data.unmap_flags & UNMAP_PRIVATIZE)) {
					OBJECT		*anmem_obp;
					off64_t		anmem_off;
					size_t		anmem_size;
					int			anmem_lock;

					// For ~MAP_LAZY, MAP_PRIVATE, non-anonymous objects, we
					// allocated all the potential anon memory we'd need to
					// privatize the object in vmm_mmap() to avoid over-commitment
					// problems. We have to free that as well now.
					anmem_obp = adp->anon;
					anmem_size = (mm->end - mm->start) + 1;
					anmem_lock = memobj_cond_lock(anmem_obp);
					anmem_off = anmem_offset(anmem_obp, mm->start, anmem_size);
					memobj_pmem_del_len(anmem_obp, anmem_off, anmem_size);
					if(!anmem_lock) memobj_unlock(anmem_obp);
				}
			}
	
			if((data.flags & PAQ_FLAG_INUSE) && (data.start < data.end)) {
				memobj_pmem_del_len(obp, data.start, (size_t)(data.end - data.start) + 1);
			}

			if(!haslock) memobj_unlock(obp);
		}
	}
	if(has_unmapped) {
		map_remove(ms);
		map_destroy(ms);
	} else {
		map_coalese(ms);
	}

	return EOK;
}
示例#3
0
文件: vmm_msync.c 项目: vocho/openqnx
int
vmm_msync(PROCESS *prp, uintptr_t vaddr, size_t len, int flags) {
    ADDRESS					*as;
    int						r;
    struct map_set			ms;
    struct mm_map			*mm;
    struct mm_object_ref	*or;
    OBJECT					*obp;
    struct cache_data		cache;

    /* MS_SYNC and MS_ASYNC are mutually exclusive */
    if ((flags & (MS_SYNC | MS_ASYNC)) == (MS_SYNC | MS_ASYNC))
        return EINVAL;

    as = prp->memory;
    r = map_isolate(&ms, &as->map, vaddr, len, MI_SPLIT);
    if(r != EOK) goto fail1;

    // We have to make sure there are extra threads available in case
    // CacheControl() puts this thread into WAITPAGE, or when we
    // go to write a page out.
    if(proc_thread_pool_reserve() != 0) {
        r = EAGAIN;
        goto fail2;
    }

    // CacheControl() might cause page faults, so let fault_pulse()
    // know that it doesn't have to grab the lock for this reference
    proc_lock_owner_mark(prp);


    if(flags & MS_ASYNC) {
        cache.flags = (flags & ~MS_ASYNC) | MS_SYNC;
    } else {
        cache.flags = flags;
    }
    cache.start = VA_INVALID;
    cache.as = as;
    for(mm = ms.first; mm != ms.last->next; mm = mm->next) {
        if(mm->mmap_flags & MAP_LAZY) {
            // We only want to do cache operations on memory that's been
            // allocated so that we avoid cache instructions 'touching'
            // non-existent pages and causing them to become allocated.
            or = mm->obj_ref;
            if(or != NULL) {
                unsigned has_lock;

                obp = or->obp;
                has_lock = memobj_cond_lock(obp);
                cache.mm = mm;
                (void)memobj_pmem_walk_mm(MPW_SYSRAM|MPW_PHYS, mm, check_cache, &cache);
                if(!has_lock) memobj_unlock(obp);
            }
        } else {
            // We could use memobj_pmem_walk_mm() for non-lazy regions
            // as well, but it's faster to do it in one gulp.
            one_cache(&cache, mm->start, mm->end + 1);
        }
    }
    if(cache.start != VA_INVALID) {
        CPU_CACHE_CONTROL(as, (void *)cache.start, cache.next - cache.start,
                          cache.flags);
    }

    if(!(flags & (MS_INVALIDATE_ICACHE|MS_CACHE_ONLY))) {
        for(mm = ms.first; mm != ms.last->next; mm = mm->next) {
            if((flags & MS_INVALIDATE) && (mm->extra_flags & EXTRA_FLAG_LOCK)) {
                r = EBUSY;
                goto fail3;
            }
            or = mm->obj_ref;
            if (or != NULL) {
                obp = or->obp;

                if ((flags & MS_INVALIDATE) ||
                        ((flags & (MS_SYNC | MS_ASYNC)) && !(mm->mmap_flags & MAP_NOSYNCFILE)))
                {
                    unsigned has_lock = memobj_cond_lock(obp);
                    if((obp->hdr.type == OBJECT_MEM_FD) &&
                            ((obp->mem.mm.flags & MM_FDMEM_NEEDSSYNC) || (flags & MS_INVALIDATE))) {
                        /*
                         * if the caller specified MS_INVALIDATE on a NOSYNCFILE mapping,
                         * skip the MS_SYNC/MS_ASYNC processing
                        */
                        int sync_flags = (mm->mmap_flags & MAP_NOSYNCFILE) ? flags & ~(MS_SYNC|MS_ASYNC) : flags;
                        r = mm_sync(obp, mm, sync_flags);
                    }
                    if(!has_lock) memobj_unlock(obp);
                    if(r != EOK) goto fail3;
                }
            }
        }
    }

fail3:
    proc_thread_pool_reserve_done();

fail2:
    map_coalese(&ms);

fail1:
    return r;
}