static void test_munlock(void) { static char page[2 * PAGE_SIZE_4K] ALIGN(PAGE_SIZE_4K); /* Standard munlock on a page-aligned region. */ int r = munlock(page, sizeof page); assert(r == 0); /* Munlock on a non-page-sized region. */ r = munlock(page, sizeof page - 42); assert(r == 0); /* XXX: As with the madvise tests, we need to disable these to avoid touching TLS. */ if (0) { /* Confirm an invalid addr is detected. */ r = munlock((void*)page + 42, sizeof page - 42); assert(r == -1); assert(errno == EINVAL); /* Confirm an invalid length is detected. */ r = munlock(page, SIZE_MAX); assert(r == -1); assert(errno == ENOMEM); /* Confirm an unmapped region is detected. */ void *BAD_ADDR = (void*)(PAGE_ALIGN_4K((uintptr_t)__executable_start) - PAGE_SIZE_4K); r = munlock(BAD_ADDR, PAGE_SIZE_4K); assert(r == -1); assert(errno == ENOMEM); } }
ssize_t v4v_sendto (int fd, const void *buf, size_t len, int flags, v4v_addr_t * dest_addr) { struct v4v_dev op; ssize_t ret; op.buf = (void *) buf; op.len = len; op.addr = dest_addr; op.flags = flags; #ifdef I_AM_A_BROKEN_WEENIE mlock (op.buf, op.len); if (op.addr) mlock (op.addr, sizeof (v4v_addr_t)); #endif ret = v4v_ioctl (fd, V4VIOCSEND, &op); #ifdef I_AM_A_BROKEN_WEENIE if (op.addr) munlock (op.addr, sizeof (v4v_addr_t)); munlock (op.buf, op.len); #endif return ret; }
int audio_open_music_resource(const unsigned char* data, size_t data_size, int track) { mlock(); if(playa.thread_music_status != TS_WAITING) { playa.thread_music_status = TS_STOPPING; munlock(); int done = 0; do { mlock(); if(playa.thread_music_status == TS_WAITING) done = 1; munlock(); if(!done) msleep(1); } while(!done); mlock(); } playa.empty_track_active = 0; munlock(); if(track == -1) { /* "empty" track */ playa.empty_track_active = 1; memset(playa.wave_buffer, 0, sizeof(playa.wave_buffer)); return 0; } ByteArray_open_mem(&playa.music_stream, (void*) data, data_size); CorePlayer_load(&playa.player.core, &playa.music_stream); assert(playa.player.core.version); if(track > playa.player.core.lastSong) return -1; playa.player.core.playSong = track; playa.player.core.initialize(&playa.player.core); return 0; }
ATF_TC_BODY(setrlimit_memlock, tc) { struct rlimit res; void *buf; long page; pid_t pid; int sta; #ifdef __FreeBSD__ /* Set max_wired really really high to avoid EAGAIN */ set_vm_max_wired(INT_MAX); #endif page = sysconf(_SC_PAGESIZE); ATF_REQUIRE(page >= 0); buf = malloc(page); pid = fork(); if (buf == NULL || pid < 0) atf_tc_fail("initialization failed"); if (pid == 0) { /* * Try to lock a page while * RLIMIT_MEMLOCK is zero. */ if (mlock(buf, page) != 0) _exit(EXIT_FAILURE); if (munlock(buf, page) != 0) _exit(EXIT_FAILURE); res.rlim_cur = 0; res.rlim_max = 0; if (setrlimit(RLIMIT_MEMLOCK, &res) != 0) _exit(EXIT_FAILURE); if (mlock(buf, page) != 0) _exit(EXIT_SUCCESS); (void)munlock(buf, page); _exit(EXIT_FAILURE); } free(buf); (void)wait(&sta); if (WIFEXITED(sta) == 0 || WEXITSTATUS(sta) != EXIT_SUCCESS) atf_tc_fail("RLIMIT_MEMLOCK not enforced"); }
ATF_TC_BODY(mlock_err, tc) { #ifdef __NetBSD__ unsigned long vmin = 0; size_t len = sizeof(vmin); #endif void *invalid_ptr; int null_errno = ENOMEM; /* error expected for NULL */ #ifdef __FreeBSD__ #ifdef VM_MIN_ADDRESS if ((uintptr_t)VM_MIN_ADDRESS > 0) null_errno = EINVAL; /* NULL is not inside user VM */ #endif /* Set max_wired really really high to avoid EAGAIN */ set_vm_max_wired(INT_MAX); #else if (sysctlbyname("vm.minaddress", &vmin, &len, NULL, 0) != 0) atf_tc_fail("failed to read vm.minaddress"); if (vmin > 0) null_errno = EINVAL; /* NULL is not inside user VM */ #endif errno = 0; ATF_REQUIRE_ERRNO(null_errno, mlock(NULL, page) == -1); errno = 0; ATF_REQUIRE_ERRNO(null_errno, mlock((char *)0, page) == -1); errno = 0; ATF_REQUIRE_ERRNO(EINVAL, mlock((char *)-1, page) == -1); errno = 0; ATF_REQUIRE_ERRNO(null_errno, munlock(NULL, page) == -1); errno = 0; ATF_REQUIRE_ERRNO(null_errno, munlock((char *)0, page) == -1); errno = 0; ATF_REQUIRE_ERRNO(EINVAL, munlock((char *)-1, page) == -1); /* * Try to create a pointer to an unmapped page - first after current * brk will likely do. */ invalid_ptr = (void*)(((uintptr_t)sbrk(0)+page) & ~(page-1)); printf("testing with (hopefully) invalid pointer %p\n", invalid_ptr); errno = 0; ATF_REQUIRE_ERRNO(ENOMEM, mlock(invalid_ptr, page) == -1); errno = 0; ATF_REQUIRE_ERRNO(ENOMEM, munlock(invalid_ptr, page) == -1); }
// return -1: when track is finished, 0 if something was played, 1 if nothing was played. int audio_process(void) { mlock(); if(playa.thread_music_status == TS_DONE) { munlock(); return -1; } else if (playa.thread_music_status == TS_WAITING) { playa.thread_music_status = TS_PLAYING; } munlock(); return 0; }
/* * Frees memory allocated to hog but doesn't free it * back to the system. */ void hog_free(void) { munlock(Hog, Numbytes); free(Hog); Hog = NULL; Numbytes = 0; }
void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l) { TORRENT_ASSERT(buf); TORRENT_ASSERT(m_magic == 0x1337); TORRENT_ASSERT(is_disk_buffer(buf, l)); #if defined TORRENT_DISK_STATS || defined TORRENT_STATS --m_allocations; #endif #ifdef TORRENT_DISK_STATS TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf]) != m_categories.end()); std::string const& category = m_buf_to_category[buf]; --m_categories[category]; m_log << log_time() << " " << category << ": " << m_categories[category] << "\n"; m_buf_to_category.erase(buf); #endif #if TORRENT_USE_MLOCK if (m_settings.lock_disk_cache) { #ifdef TORRENT_WINDOWS VirtualUnlock(buf, m_block_size); #else munlock(buf, m_block_size); #endif } #endif #ifdef TORRENT_DISABLE_POOL_ALLOCATOR page_aligned_allocator::free(buf); #else m_pool.free(buf); #endif --m_in_use; }
int main(void) { char *buf; int lc; setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { buf = mmap(NULL, file_len, PROT_WRITE, MAP_SHARED, fd, 0); if (buf == MAP_FAILED) tst_brkm(TBROK | TERRNO, cleanup, "mmap"); if (mlock(buf, file_len) == -1) tst_brkm(TBROK | TERRNO, cleanup, "mlock"); tst_resm(TINFO, "locked %d bytes from %p", file_len, buf); if (munlock(buf, file_len) == -1) tst_brkm(TBROK | TERRNO, cleanup, "munlock"); if (munmap(buf, file_len) == -1) tst_brkm(TBROK | TERRNO, cleanup, "munmap"); } tst_resm(TPASS, "test succeeded."); cleanup(); tst_exit(); }
static void test_simple_mlock(int flags) { int fd = hugetlbfs_unlinked_fd(); void *p; int ret; long hpage_size = check_hugepagesize(); p = mmap(0, hpage_size, PROT_READ|PROT_WRITE, flags, fd, 0); if (p == MAP_FAILED) FAIL("mmap() failed (flags=%x): %s", flags, strerror(errno)); ret = mlock(p, hpage_size); if (ret) FAIL("mlock() failed (flags=%x): %s", flags, strerror(errno)); ret = munlock(p, hpage_size); if (ret) FAIL("munlock() failed (flags=%x): %s", flags, strerror(errno)); ret = munmap(p, hpage_size); if (ret) FAIL("munmap() failed (flags=%x): %s", flags, strerror(errno)); close(fd); }
static int deinit_ringbuffers() { int b,c; for(b=0; b<2; b++) { if (ringbuffers[b]) { for(c=0;c<2;c++) { if (ringbuffers[b]->buffer[c]) { jack_ringbuffer_free(ringbuffers[b]->buffer[c]); } } if (munlock(ringbuffers[b], sizeof(rotter_ringbuffer_t))) { rotter_error("Failed to unlock ringbuffer %c from physical memory.", ringbuffers[b]->label); } if (ringbuffers[b]->file_handle) { rotter_close_file(ringbuffers[b]); ringbuffers[b]->file_handle = NULL; } free(ringbuffers[b]); } } return 0; }
os_result os_procMUnlock( const void *addr, os_address length) { int r; os_result result; r = (int)munlock(addr, (size_t)length); if (r == 0) { result = os_resultSuccess; } else { if (errno == EPERM) { OS_REPORT(OS_ERROR, "os_procMLock", 0, "Current process has insufficient privilege"); } else { if (errno == ENOMEM) { OS_REPORT(OS_ERROR, "os_procMLock", 0, "Current process has non-zero RLIMIT_MEMLOCK"); } } result = os_resultFail; } return result; }
ATF_TC_BODY(mlock_limits, tc) { struct rlimit res; void *buf; pid_t pid; int sta; buf = malloc(page); ATF_REQUIRE(buf != NULL); pid = fork(); ATF_REQUIRE(pid >= 0); if (pid == 0) { for (ssize_t i = page; i >= 2; i -= 100) { res.rlim_cur = i - 1; res.rlim_max = i - 1; (void)fprintf(stderr, "trying to lock %zd bytes " "with %zu byte limit\n", i, (size_t)res.rlim_cur); if (setrlimit(RLIMIT_MEMLOCK, &res) != 0) _exit(EXIT_FAILURE); errno = 0; #ifdef __FreeBSD__ /* * NetBSD doesn't conform to POSIX with ENOMEM requirement; * FreeBSD does. * * See: NetBSD PR # kern/48962 for more details. */ if (mlock(buf, i) != -1 || errno != ENOMEM) { #else if (mlock(buf, i) != -1 || errno != EAGAIN) { #endif (void)munlock(buf, i); _exit(EXIT_FAILURE); } } _exit(EXIT_SUCCESS); } (void)wait(&sta); if (WIFEXITED(sta) == 0 || WEXITSTATUS(sta) != EXIT_SUCCESS) atf_tc_fail("mlock(2) locked beyond system limits"); free(buf); } ATF_TC(mlock_mmap); ATF_TC_HEAD(mlock_mmap, tc) { atf_tc_set_md_var(tc, "descr", "Test mlock(2)-mmap(2) interaction"); }
int main(int argc, char *argv[]) { int lc; char *msg; long from, to; long first = -1, last = -1; char b[KB]; FILE *fp; if ((msg = parse_opts(argc, argv, NULL, NULL)) != NULL) tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg); setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { fp = fopen("/proc/self/maps", "r"); if (fp == NULL) tst_brkm(TBROK|TERRNO, cleanup, "fopen"); while (!feof(fp)) { if (!fgets(b, KB - 1, fp)) break; b[strlen(b) - 1] = '\0'; sscanf(b, "%lx-%lx", &from, &to); /* Record the initial stack size. */ if (lc == 0 && strstr(b, "[stack]") != NULL) first = (to - from)/KB; switch (lc & 1) { case 0: if (mlock((const void*)from, to-from) == -1) tst_resm(TINFO|TERRNO, "mlock failed"); break; case 1: if (munlock((void*)from, to - from) == -1) tst_resm(TINFO|TERRNO, "munlock failed"); break; default: break; } tst_resm(TINFO, "%s from %lx to %0lx", (lc&1) ? "munlock" : "mlock", from, to); /* Record the final stack size. */ if (strstr(b, "[stack]") != NULL) last = (to - from) / KB; } fclose(fp); } tst_resm(TINFO, "starting stack size is %ld", first); tst_resm(TINFO, "final stack size is %ld", last); if (last < first) tst_resm(TFAIL, "stack size is decreased."); else tst_resm(TPASS, "stack size is not decreased."); cleanup(); tst_exit(); }
void ipc_destroy(ipc_t *ctx) { char file_name[64]; mup_remove(ctx); munlock(ctx->mup,sizeof(mup_t)); if(munmap(ctx->mup,sizeof(mup_t))!=0) { fprintf(stderr,"shmdt returned non-zero.\n"); exit(EX_DATAERR); } else { sprintf(file_name,"%s",ctx->ipc_name); // shm_unlink(file_name); // close(ctx->mup_shm_fd); } sem_close(ctx->mup_sem); sprintf(file_name,"%s",ctx->ipc_name); sem_unlink(file_name); // shm_unlink(file_name); sprintf(file_name,"%s.%d",ctx->ipc_name,getpid()); sem_unlink(file_name); sprintf(file_name,"%s.shm.%d",ctx->ipc_name,getpid()); shm_unlink(file_name); }
/** * Just free the memory region, the file will be closed on program exit. */ void tdb_htrie_pure_close(void *addr, size_t size, int fd) { munlock(addr, size); munmap(addr, size); close(fd); }
void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) { len = align_up(len, page_size); memory_cleanse(addr, len); munlock(addr, len); munmap(addr, len); }
int xc_mem_paging_load(xc_interface *xch, domid_t domain_id, uint64_t gfn, void *buffer) { int rc, old_errno; errno = EINVAL; if ( !buffer ) return -1; if ( ((unsigned long) buffer) & (XC_PAGE_SIZE - 1) ) return -1; if ( mlock(buffer, XC_PAGE_SIZE) ) return -1; rc = xc_mem_paging_memop(xch, domain_id, XENMEM_paging_op_prep, gfn, buffer); old_errno = errno; munlock(buffer, XC_PAGE_SIZE); errno = old_errno; return rc; }
void AudioChannel::set_buffer_size( nframes_t size ) { #ifdef USE_MLOCK if (mlocked) { if (munlock (m_buffer, m_bufferSize) == -1) { PERROR("Couldn't lock buffer into memory"); } mlocked = 0; } #endif /* USE_MLOCK */ if (m_buffer) { delete [] m_buffer; } m_buffer = new audio_sample_t[size]; m_bufferSize = size; silence_buffer(size); #ifdef USE_MLOCK if (mlock (m_buffer, size) == -1) { PERROR("Couldn't lock buffer into memory"); } mlocked = 1; #endif /* USE_MLOCK */ }
// Initialize the interface implementation // Return: // true if it has succeeded, false if it has failed bool GCToOSInterface::Initialize() { // Calculate and cache the number of processors on this machine int cpuCount = sysconf(SYSCONF_GET_NUMPROCS); if (cpuCount == -1) { return false; } g_logicalCpuCount = cpuCount; // Verify that the s_helperPage is really aligned to the g_SystemInfo.dwPageSize assert((((size_t)g_helperPage) & (OS_PAGE_SIZE - 1)) == 0); // Locking the page ensures that it stays in memory during the two mprotect // calls in the FlushProcessWriteBuffers below. If the page was unmapped between // those calls, they would not have the expected effect of generating IPI. int status = mlock(g_helperPage, OS_PAGE_SIZE); if (status != 0) { return false; } status = pthread_mutex_init(&g_flushProcessWriteBuffersMutex, NULL); if (status != 0) { munlock(g_helperPage, OS_PAGE_SIZE); return false; } return true; }
/* * A cache is kind of pointless if it is swappable, right? Let's give * applications the ability to pin the cache memory. This is a separate * call from io_init_cache() because non-privileged users can't do it, and * they still want to create small caches. */ errcode_t io_mlock_cache(io_channel *channel) { int rc; struct io_cache *ic = channel->io_cache; long pages_wanted, avpages; if (!ic) return OCFS2_ET_INVALID_ARGUMENT; if (ic->ic_locked) return 0; /* * We're going to lock our cache pages. We don't want to * request more memory than the system has, though. */ pages_wanted = channel->io_blksize * ic->ic_nr_blocks / getpagesize(); avpages = sysconf(_SC_AVPHYS_PAGES); if (pages_wanted > avpages) return OCFS2_ET_NO_MEMORY; rc = mlock(ic->ic_data_buffer, ic->ic_data_buffer_len); if (!rc) { rc = mlock(ic->ic_metadata_buffer, ic->ic_metadata_buffer_len); if (rc) munlock(ic->ic_data_buffer, ic->ic_data_buffer_len); } if (rc) return OCFS2_ET_NO_MEMORY; ic->ic_locked = 1; return 0; }
// Shutdown the interface implementation void GCToOSInterface::Shutdown() { int ret = munlock(g_helperPage, OS_PAGE_SIZE); assert(ret == 0); ret = pthread_mutex_destroy(&g_flushProcessWriteBuffersMutex); assert(ret == 0); }
int Mono_Posix_Syscall_munlock (void *start, mph_size_t len) { mph_return_if_size_t_overflow (len); return munlock (start, (size_t) len); }
void SDMCKT_free(void *ptr, size_t sz) { memset(ptr, 0, sz); #ifdef WORKING_MLOCK munlock(ptr, sz); #endif free(ptr); }
bool MemoryPageLocker::Unlock(const void* addr, size_t len) { #ifdef WIN32 return VirtualUnlock(const_cast<void*>(addr), len) != 0; #else return munlock(addr, len) == 0; #endif }
/** * @brief Unpin the memory that is backing the pool from RAM. * @param The memory pool to unpin. * @return 0 on success, -1 on failure. */ int lpx_mempool_unpin_variable_pool(lpx_mempool_variable_t *pool) { if (UNLIKELY(pool == NULL)) { return MEMPOOL_FAILURE; } return munlock(pool->pool, pool->poolSize); }
static int Lfree(unsigned char *mem,int size){ int r; r=munlock((void*)mem,size); if(r<0) return BADFREE; free(mem); return OK; }
SamplesInput::bin1::~bin1() { if ( mlockArg.getValue() ) { munlock ( fileoffset, RealFileSize ); } munmap ( fileoffset, RealFileSize ); close ( inputfd ); }
static void unlockMemory( INOUT MEM_INFO_HEADER *memHdrPtr ) { assert( isWritePtr( memHdrPtr, sizeof( MEM_INFO_HEADER ) ) ); /* If the memory was locked, unlock it now */ if( memHdrPtr->flags & MEM_FLAG_LOCKED ) munlock( ( void * ) memHdrPtr, memHdrPtr->size ); }
void SecureDestroy(char *Mem, int Size) { if (! Mem) return; SecureClearMem(Mem, Size); munlock(Mem, Size); free(Mem); }