void slaveLoop(void *id) { int myid = *((int*)id); int i, j, ret; int local_sense = 0; int total_iter=0; long long int temp; char localbuf[1024]; unsigned int input_array[6]; int mybinding; unsigned long long c; emptyPage_t** pagesVector; tm_bind_to_cabinet(myid+1); Barrier(&local_sense, myid, num_threads); if (myid == num_threads-1) { set_handler_address(); set_log_base(myid); int k = 0; BEGIN_ESCAPE while (1) { while (!cleanus_counter); for (c=0; c<num_threads+1; c++) { memcntl(GlobalPagesVector[c], 8192, MC_SYNC, (caddr_t) (MS_SYNC|MS_INVALIDATE), 0, 0); } pthread_mutex_lock(&clean_mutex); cleanus_counter = 0; pthread_mutex_unlock(&clean_mutex); } END_ESCAPE } else {
int main(int argc, char *argv[]) { #if _lib_memcntl /* advise larger stack size */ struct memcntl_mha mha; mha.mha_cmd = MHA_MAPSIZE_STACK; mha.mha_flags = 0; mha.mha_pagesize = 64 * 1024; (void)memcntl(NULL, 0, MC_HAT_ADVISE, (caddr_t)&mha, 0, 0); #endif sh_waitnotify((Shnote_f)0); return(sh_main(argc, argv, (Shinit_f)0)); }
/* * shmat interpose */ void * shmat(int shmid, const void *shmaddr, int shmflag) { static caddr_t (*shmatfunc)() = NULL; void *result; int advice = -1; struct shmid_ds mds; #ifdef MADVDEBUG int rc; #else /* LINTED */ int rc; #endif if (!shmatfunc) { shmatfunc = (caddr_t (*)()) dlsym(RTLD_NEXT, "shmat"); assert(shmatfunc); } result = shmatfunc(shmid, shmaddr, shmflag); /* * Options ism, dism take precedence over option shm. */ if (advice_ism >= 0 && (shmflag & SHM_SHARE_MMU)) { advice = advice_ism; } else if (advice_dism >= 0 && (shmflag & SHM_PAGEABLE)) { advice = advice_dism; } else if (advice_shm >= 0) { advice = advice_shm; } /* * Apply advice if specified and shmat succeeded. */ if (advice >= 0 && result != (void *)-1) { /* First determine segment size */ rc = shmctl(shmid, IPC_STAT, &mds); MADVPRINT(4, (stderr, "shmctl rc %d errno %d\n", strerror(errno))); rc = memcntl(result, mds.shm_segsz, MC_ADVISE, (caddr_t)(intptr_t)advice, 0, 0); MADVPRINT(1, (stderr, "shmat advice: 0x%x 0x%x %d, rc %d errno %d\n", result, mds.shm_segsz, advice, rc, errno)); } return (result); }
static int pgszset(size_t sz, uint_t flags) { struct memcntl_mha mpss; int rc; mpss.mha_cmd = (flags == MPSSHEAP) ? MHA_MAPSIZE_BSSBRK: MHA_MAPSIZE_STACK; mpss.mha_pagesize = sz; mpss.mha_flags = 0; rc = memcntl(NULL, 0, MC_HAT_ADVISE, (caddr_t)&mpss, 0, 0); return (rc); }
/* * mmap interpose */ caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos) { static caddr_t (*mmapfunc)() = NULL; caddr_t result; int advice = -1; #ifdef MADVDEBUG int rc; #else /* LINTED */ int rc; #endif if (!mmapfunc) { mmapfunc = (caddr_t (*)()) dlsym(RTLD_NEXT, "mmap"); assert(mmapfunc); } result = mmapfunc(addr, len, prot, flags, fd, pos); /* * Option mapanon has highest precedence while option map * has lowest precedence. */ if (advice_mapanon >= 0 && (flags & MAP_ANON)) { advice = advice_mapanon; } else if (advice_mapshared >= 0 && (flags & MAP_SHARED)) { advice = advice_mapshared; } else if (advice_mapprivate >= 0 && (flags & MAP_PRIVATE)) { advice = advice_mapprivate; } else if (advice_map >= 0) { advice = advice_map; } /* * Apply advice if specified and mmap succeeded. */ if (advice >= 0 && result != MAP_FAILED) { rc = memcntl(result, len, MC_ADVISE, (caddr_t)(intptr_t)advice, 0, 0); MADVPRINT(1, (stderr, "mmap advice: 0x%x 0x%x %d, rc %d errno %d\n", result, len, advice, rc, errno)); } return (result); }
/* * On systems that supports multiple page sizes we may reduce the * number of TLB-misses by using the biggest available page size */ static int enable_large_pages ( void ) { #if defined(HAVE_GETPAGESIZES) && defined(HAVE_MEMCNTL) int ret = - 1; size_t sizes[32]; int avail = getpagesizes (sizes, 32); if ( avail != - 1 ) { size_t max = sizes[0]; struct memcntl_mha arg = { 0 }; int ii; for ( ii = 1; ii < avail; ++ ii ) { if ( max < sizes[ii] ) { max = sizes[ii]; } } arg.mha_flags = 0; arg.mha_pagesize = max; arg.mha_cmd = MHA_MAPSIZE_BSSBRK; if ( memcntl (0, 0, MC_HAT_ADVISE, ( caddr_t ) & arg, 0, 0) == - 1 ) { fprintf (stderr, "Failed to set large pages: %s\n", strerror (errno)); fprintf (stderr, "Will use default page size\n"); } else { ret = 0; } } else { fprintf (stderr, "Failed to get supported pagesizes: %s\n", strerror (errno)); fprintf (stderr, "Will use default page size\n"); } return ret; #else return - 1; #endif }
static void bigheap(void) { size_t big, *size; int sizes; struct memcntl_mha mha; /* * First, get the available pagesizes. */ if ((sizes = getpagesizes(NULL, 0)) == -1) return; if (sizes == 1 || (size = alloca(sizeof (size_t) * sizes)) == NULL) return; if (getpagesizes(size, sizes) == -1) return; while (size[sizes - 1] > maxpgsize) sizes--; /* set big to the largest allowed page size */ big = size[sizes - 1]; if (big & (big - 1)) { /* * The largest page size is not a power of two for some * inexplicable reason; return. */ return; } /* * Now, align our break to the largest page size. */ if (brk((void *)((((uintptr_t)sbrk(0) - 1) & ~(big - 1)) + big)) != 0) return; /* * set the preferred page size for the heap */ mha.mha_cmd = MHA_MAPSIZE_BSSBRK; mha.mha_flags = 0; mha.mha_pagesize = big; (void) memcntl(NULL, 0, MC_HAT_ADVISE, (caddr_t)&mha, 0, 0); }
int main(void) { void *buf; pid_t child; int ret, i; siginfo_t info; uint8_t *ubuf; size_t mapsz = sysconf(_SC_PAGESIZE) * 2; buf = mmap(NULL, mapsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(buf != MAP_FAILED); ret = memcntl(buf, mapsz, MC_INHERIT_ZERO, 0, 0, 0); assert(ret == 0); again: memset(buf, 'a' + nchild, mapsz); child = fork(); if (child == 0) { nchild--; for (i = 0, ubuf = buf; i < mapsz; i++) assert(ubuf[i] == 0); if (nchild != 0) goto again; exit(0); } assert(child != -1); do { ret = waitid(P_PID, child, &info, WEXITED); } while (ret == -1 && errno == EINTR); assert(ret == 0); assert(info.si_pid == child); assert(info.si_status == 0); for (i = 0, ubuf = buf; i < mapsz; i++) assert(ubuf[i] == 'a' + nchild); return (0); }
static void __madvmain() { char *cfgfile, *errfile; FILE *fp = NULL; const char *execname; char *cwd; int cwdlen; char *tok, *tokadv, *tokarg; char *str, *envadv; int lineno = 0; int advice; uintptr_t brkbase, brkend; size_t brksize; int rc; char *locale; /* * If a private error file is indicated then set the locale * for error messages for the duration of this routine. * Error messages destined for syslog should not be translated * and thus come from the default C locale. */ if ((errfile = getenv(ENV_MADVERRFILE)) != NULL) { errfp = fopen(errfile, "aF"); if (errfp) { locale = setlocale(LC_MESSAGES, ""); } else { madverr(NULL, dgettext(TEXT_DOMAIN, "%s: cannot open error file: %s [%s]\n"), madvident, errfile, strerror(errno)); } } #ifdef MADVDEBUG if (str = getenv(ENV_MADVDEBUG)) madvdebug = atoi(str); #endif if (envadv = getenv(ENV_MADV)) { if ((advice = strtoadv(envadv)) >= 0) advice_all = advice; else madverr(errfp, dgettext(TEXT_DOMAIN, "%s: invalid advice specified: MADV=%s\n"), madvident, envadv); } /* * Open specified cfg file or default one. */ if (cfgfile = getenv(ENV_MADVCFGFILE)) { fp = fopen(cfgfile, "rF"); if (!fp) { madverr(errfp, dgettext(TEXT_DOMAIN, "%s: cannot open configuration file: %s [%s]\n"), madvident, cfgfile, strerror(errno)); } } else { cfgfile = DEF_MADVCFGFILE; fp = fopen(cfgfile, "rF"); } if (fp) { execname = mygetexecname(); cwd = getcwd(pbuf, MAXPATHLEN); if (!cwd) return; cwd = strcat(cwd, "/"); cwdlen = strlen(cwd); while (fgets(lbuf, MAXLINELEN, fp)) { lineno++; /* * Make sure line wasn't truncated. */ if (strlen(lbuf) >= MAXLINELEN - 1) { madverr(errfp, dgettext(TEXT_DOMAIN, "%s: invalid entry, " "line too long - cfgfile:" " %s, line: %d\n"), madvident, cfgfile, lineno); continue; } if (empty(lbuf)) continue; /* * Get advice options. * Parse right to left in case delimiter is in name. */ if (!(tokadv = strrchr(lbuf, CFGDELIMITER))) { madverr(errfp, dgettext(TEXT_DOMAIN, "%s: no delimiter specified - cfgfile:" " %s, line: %d\n"), madvident, cfgfile, lineno); continue; } *tokadv++ = '\0'; /* * Remove newline from end of advice options. */ if (str = strrchr(tokadv, '\n')) *str = '\0'; /* * Get optional argument string. */ if (tokarg = strrchr(lbuf, ARGDELIMITER)) { *tokarg++ = '\0'; } /* * Compare exec name. */ tok = lbuf; if (!fnmatch(execname, tok, cwd)) { tokadv = tokarg = NULL; cwd[cwdlen] = '\0'; continue; } /* * Compare arguments if argument string specified. */ if (tokarg && !empty(tokarg) && !argmatch(tokarg)) { tokadv = tokarg = NULL; cwd[cwdlen] = '\0'; continue; } /* * Parse advice options. * If empty, any advice from ENV_MADV is reset. */ if (empty(tokadv)) { advice_all = -1; } else { advice_opts(tokadv, execname, cfgfile, lineno); } break; } (void) fclose(fp); } /* * Pagesize needed for proper aligning by brk interpose. */ pagesize = sysconf(_SC_PAGESIZE); /* * Apply global advice if set. * Specific options in the cfgfile take precedence. */ if (advice_all >= 0) { if (advice_heap < 0) advice_heap = advice_all; if (advice_shm < 0) advice_shm = advice_all; if (advice_map < 0) advice_map = advice_all; } MADVPRINT(2, (stderr, "advice_all %d\n", advice_all)); MADVPRINT(2, (stderr, "advice_heap %d\n", advice_heap)); MADVPRINT(2, (stderr, "advice_shm %d\n", advice_shm)); MADVPRINT(2, (stderr, "advice_ism %d\n", advice_ism)); MADVPRINT(2, (stderr, "advice_dism %d\n", advice_dism)); MADVPRINT(2, (stderr, "advice_map %d\n", advice_map)); MADVPRINT(2, (stderr, "advice_mapshared %d\n", advice_mapshared)); MADVPRINT(2, (stderr, "advice_mapprivate %d\n", advice_mapprivate)); MADVPRINT(2, (stderr, "advice_mapanon %d\n", advice_mapanon)); /* * If heap advice is specified, apply it to the existing heap. * As the heap grows the kernel applies the advice automatically * to new portions of the heap. */ if (advice_heap >= 0) { if (rc = mygetbrk(&brkbase, &brksize)) { madverr(errfp, dgettext(TEXT_DOMAIN, "%s: /proc/self/status read failed [%s]\n"), madvident, strerror(rc)); } else { MADVPRINT(4, (stderr, "brkbase 0x%x brksize 0x%x\n", brkbase, brksize)); /* * Align start address for memcntl and apply advice * on full pages of heap. Create a page of heap if * it does not already exist. */ brkend = roundup(brkbase+brksize, pagesize); brkbase = roundup(brkbase, pagesize); brksize = brkend - brkbase; if (brksize < pagesize) { if (sbrk(pagesize) == (void *)-1) { madverr(errfp, dgettext(TEXT_DOMAIN, "%s: sbrk failed [%s]\n"), madvident, strerror(errno)); goto out; } brksize = pagesize; } MADVPRINT(1, (stderr, "heap advice: 0x%x 0x%x %d\n", brkbase, brksize, advice_heap)); if (memcntl((caddr_t)brkbase, brksize, MC_ADVISE, (caddr_t)(intptr_t)advice_heap, 0, 0) < 0) { madverr(errfp, dgettext(TEXT_DOMAIN, "%s: memcntl() failed [%s]: heap advice\n"), madvident, strerror(errno)); } } } out: if (errfp) { (void) fclose(errfp); (void) setlocale(LC_MESSAGES, locale); } else { /* close log file: no-op if nothing logged to syslog */ closelog(); } }
int munlock (const void *addr, size_t len) { return memcntl ((void*)addr, len, MC_UNLOCK, 0, 0, 0); }
static void *alloc_hugetlb(void *address){ void *map_address = (void *)-1; #if defined(OS_LINUX) || defined(OS_AIX) int shmid; shmid = shmget(IPC_PRIVATE, BUFFER_SIZE, #ifdef OS_LINUX SHM_HUGETLB | #endif #ifdef OS_AIX SHM_LGPAGE | SHM_PIN | #endif IPC_CREAT | SHM_R | SHM_W); if (shmid != -1) { map_address = (void *)shmat(shmid, address, SHM_RND); #ifdef OS_LINUX my_mbind(map_address, BUFFER_SIZE, MPOL_PREFERRED, NULL, 0, 0); #endif if (map_address != (void *)-1){ shmctl(shmid, IPC_RMID, 0); } } #endif #ifdef __sun__ struct memcntl_mha mha; mha.mha_cmd = MHA_MAPSIZE_BSSBRK; mha.mha_flags = 0; mha.mha_pagesize = HUGE_PAGESIZE; memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0); map_address = (BLASULONG)memalign(HUGE_PAGESIZE, BUFFER_SIZE); #endif #ifdef OS_WINDOWS HANDLE hToken; TOKEN_PRIVILEGES tp; if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken) != TRUE) return (void *) -1; tp.PrivilegeCount = 1; tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &tp.Privileges[0].Luid) != TRUE) return (void *) -1; if (AdjustTokenPrivileges(hToken, FALSE, (PTOKEN_PRIVILEGES)&tp, 0, NULL, NULL) != TRUE) return (void *) -1; map_address = (void *)VirtualAlloc(address, BUFFER_SIZE, MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); AdjustTokenPrivileges(hToken, TRUE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, NULL); if (map_address == (void *)NULL) map_address = (void *)-1; #endif if (map_address != (void *)-1){ release_info[release_pos].address = map_address; release_info[release_pos].func = alloc_hugetlb_free; release_pos ++; } return map_address; }
int madvise (__ptr_t addr, size_t len, int advice) { return memcntl (addr, len, MC_ADVISE, (caddr_t)(intptr_t)advice, 0, 0); }
int pr_memcntl(struct ps_prochandle *Pr, caddr_t addr, size_t len, int cmd, caddr_t arg, int attr, int mask) { sysret_t rval; /* return value from memcntl() */ argdes_t argd[6]; /* arg descriptors for memcntl() */ argdes_t *adp; int error; if (Pr == NULL) /* no subject process */ return (memcntl(addr, len, cmd, arg, attr, mask)); adp = &argd[0]; /* addr argument */ adp->arg_value = (uintptr_t)addr; adp->arg_object = NULL; adp->arg_type = AT_BYVAL; adp->arg_inout = AI_INPUT; adp->arg_size = 0; adp++; /* len argument */ adp->arg_value = len; adp->arg_object = NULL; adp->arg_type = AT_BYVAL; adp->arg_inout = AI_INPUT; adp->arg_size = 0; adp++; /* cmd argument */ adp->arg_value = cmd; adp->arg_object = NULL; adp->arg_type = AT_BYVAL; adp->arg_inout = AI_INPUT; adp->arg_size = 0; adp++; /* arg argument */ if (cmd == MC_HAT_ADVISE) { adp->arg_value = 0; adp->arg_object = arg; adp->arg_type = AT_BYREF; adp->arg_inout = AI_INPUT; #ifdef _LP64 if (Pstatus(Pr)->pr_dmodel == PR_MODEL_ILP32) adp->arg_size = sizeof (struct memcntl_mha32); else adp->arg_size = sizeof (struct memcntl_mha); #else adp->arg_size = sizeof (struct memcntl_mha); #endif } else { adp->arg_value = (uintptr_t)arg; adp->arg_object = NULL; adp->arg_type = AT_BYVAL; adp->arg_inout = AI_INPUT; adp->arg_size = 0; } adp++; /* attr argument */ adp->arg_value = attr; adp->arg_object = NULL; adp->arg_type = AT_BYVAL; adp->arg_inout = AI_INPUT; adp->arg_size = 0; adp++; /* mask argument */ adp->arg_value = mask; adp->arg_object = NULL; adp->arg_type = AT_BYVAL; adp->arg_inout = AI_INPUT; adp->arg_size = 0; error = Psyscall(Pr, &rval, SYS_memcntl, 6, &argd[0]); if (error) { errno = (error > 0)? error : ENOSYS; return (-1); } return (0); }
w_rc_t sthread_t::set_bufsize_normal( size_t size, char *&buf_start /* in/out*/, long system_page_size) { size_t requested_size = size; // save for asserts later // *********************************************************** // // GET PAGE SIZES // // If the SM pagesize is larger than the largest system page size, // align everything on the former (safe and is less confusing). // // *********************************************************** long max_page_size = get_max_page_size(system_page_size); w_assert1(system_page_size <= max_page_size); long align_page_size = (SM_PAGESIZE > max_page_size)? SM_PAGESIZE : max_page_size; // *********************************************************** // // GET FILE DESCRIPTOR FOR MMAP // // *********************************************************** int fd(-1); // must be -1 if not mapping to a file // *********************************************************** // // GET FLAGS FOR MMAP // // If posix mmapped file are available, _POSIX_MAPPED_FILES is defined // in <unistd.h> to be > 0 // // That should give you these flags: // MAP_FIXED, MAP_PRIVATE, MAP_NORESERVE, MAP_ANONYMOUS // If MAP_ANONYMOUS is not there, MAP_ANON might be. // // However... systems aren't exactly in sync here, so configure.ac // checks for each of these flags. // // *********************************************************** int flags1 = MAP_PRIVATE; size_t extra_align = align_page_size; size_t align_arg = 0; #if HAVE_DECL_MAP_ANONYMOUS==1 flags1 |= MAP_ANONYMOUS; #elif HAVE_DECL_MAP_ANON==1 flags1 |= MAP_ANON; #else #endif #if HAVE_DECL_MAP_NORESERVE==1 flags1 |= MAP_NORESERVE; #endif #if HAVE_DECL_MAP_ALIGN==1 flags1 |= MAP_ALIGN; extra_align = 0; align_arg = align_page_size; #endif // add the extra alignment to the size requested before alignment, // and then do our own alignment at the end In the case of // MAP_ALIGN this is unnecessary, and the extra alignment is zero. size += extra_align; align_bufsize(size, system_page_size, align_page_size); // *********************************************************** // // FIRST MMAP: get a mapped region from the kernel. // If we are using hugetlbfs, fd will be >= 0 and // we won't have to do the remap -- the first mapping will // give us the best page sizes we can get. In that case, // skip the first mmap and do exactly one "second mmap" // // *********************************************************** errno = 0; _disk_buffer = (char*) mmap((char*)align_arg, _disk_buffer_size, PROT_NONE, flags1, fd, /* fd */ 0 /* off_t */ ); if (_disk_buffer == MAP_FAILED) { std::cerr << __LINE__ << " " << "mmap (size=" << _disk_buffer_size << " = " << int(_disk_buffer_size/1024) << " KB ) returns " << long(_disk_buffer) << " errno is " << errno << " " << strerror(errno) << " flags " << flags1 << " fd " << fd << std::endl; return RC(fcMMAPFAILED); } #if W_DEBUG_LEVEL > 4 else { std::cerr << __LINE__ << " " << "mmap SUCCESS! (size=" << _disk_buffer_size << " = " << int(_disk_buffer_size/1024) << " KB ) returns " << long(_disk_buffer) << " errno is " << errno << " " << strerror(errno) << " flags " << flags1 << " fd " << fd << std::endl; } #endif // *********************************************************** // // RE-MMAP: manually align the region and give the useful part R/W // permissions. // // *********************************************************** _disk_buffer = (char*)alignon(_disk_buffer, align_page_size); alignon(requested_size, system_page_size); if (mprotect(_disk_buffer, requested_size, PROT_READ|PROT_WRITE)) { std::cerr << __LINE__ << " " << "mprotect (addr=" << long(_disk_buffer) << ", size=" << requested_size << ") returns -1;" << " errno is " << errno << " " << strerror(errno) << std::endl; do_unmap(); return RC(fcMMAPFAILED); } #ifdef HAVE_MEMCNTL struct memcntl_mha info; info.mha_cmd = MHA_MAPSIZE_VA; info.mha_flags = 0; info.mha_pagesize = max_page_size; // Ask the kernel to use the max page size here if(memcntl(_disk_buffer, requested_size, MC_HAT_ADVISE, (char *)&info, 0, 0) < 0) { std::cerr << "memcntl returns -1;" << " errno is " << errno << " " << strerror(errno) << " requested size " << max_page_size << std::endl; } #endif align_for_sm(requested_size); buf_start = _disk_buffer; clear(buf_start, requested_size); return RCOK; }
/* * plock */ int plock(int op) /* desired operation */ { int e; /* return value */ pid_t pid; /* current pid */ lmutex_lock(&plock_lock); /* * Validate state of lock's. If parent has forked, then * the lock state needs to be reset (children do not inherit * memory locks, and thus do not inherit their state). */ if ((pid = getpid()) != state_pid) { lock_state = 0; state_pid = pid; } /* * Dispatch on operation. Note: plock and its relatives depend * upon "op" being bit encoded. */ switch (op) { /* * UNLOCK: remove all memory locks. Requires that some be set! */ case UNLOCK: if (lock_state == 0) { errno = EINVAL; lmutex_unlock(&plock_lock); return (-1); } e = munlockall(); if (e) { lmutex_unlock(&plock_lock); return (-1); } else { lock_state = 0; lmutex_unlock(&plock_lock); return (0); } /*NOTREACHED*/ /* * TXTLOCK: locks text segments. */ case TXTLOCK: /* * If a text or process lock is already set, then fail. */ if ((lock_state & TXTLOCK) || (lock_state & PROCLOCK)) { errno = EINVAL; lmutex_unlock(&plock_lock); return (-1); } /* * Try to apply the lock(s). If a failure occurs, * memcntl backs them out automatically. */ e = memcntl(NULL, 0, MC_LOCKAS, (caddr_t)MCL_CURRENT, PROC_TEXT|PRIVATE, (int)NULL); if (!e) lock_state |= TXTLOCK; lmutex_unlock(&plock_lock); return (e); /*NOTREACHED*/ /* * DATLOCK: locks data segment(s), including the stack and all * future growth in the address space. */ case DATLOCK: /* * If a data or process lock is already set, then fail. */ if ((lock_state & DATLOCK) || (lock_state & PROCLOCK)) { errno = EINVAL; lmutex_unlock(&plock_lock); return (-1); } /* * Try to lock the data and stack segments. On failure * memcntl undoes the locks internally. */ e = memcntl(NULL, 0, MC_LOCKAS, (caddr_t)MCL_CURRENT, PROC_DATA|PRIVATE, (int)NULL); if (e) { lmutex_unlock(&plock_lock); return (-1); } /* try to set a lock for all future mappings. */ e = mlockall(MCL_FUTURE); /* * If failures have occurred, back out the locks * and return failure. */ if (e) { e = errno; (void) memcntl(NULL, 0, MC_UNLOCKAS, (caddr_t)MCL_CURRENT, PROC_DATA|PRIVATE, (int)NULL); errno = e; lmutex_unlock(&plock_lock); return (-1); } /* * Data, stack, and growth have been locked. Set state * and return success. */ lock_state |= DATLOCK; lmutex_unlock(&plock_lock); return (0); /*NOTREACHED*/ /* * PROCLOCK: lock everything, and all future things as well. * There should be nothing locked when this is called. */ case PROCLOCK: if (lock_state) { errno = EINVAL; lmutex_unlock(&plock_lock); return (-1); } if (mlockall(MCL_CURRENT | MCL_FUTURE) == 0) { lock_state |= PROCLOCK; lmutex_unlock(&plock_lock); return (0); } else { lmutex_unlock(&plock_lock); return (-1); } /*NOTREACHED*/ /* * Invalid operation. */ default: errno = EINVAL; lmutex_unlock(&plock_lock); return (-1); /*NOTREACHED*/ } }
int mlockall(int flags) { return (memcntl(0, 0, MC_LOCKAS, (caddr_t)(uintptr_t)flags, 0, 0)); }