/* read HTTP request from sockfd, parse it into command * and its parameters (for instance, command='udp' and * parameters being '192.168.0.1:5002') */ static int read_command( int sockfd, struct server_ctx *srv) { #define DBUF_SZ 2048 /* max size for raw data with HTTP request */ #define RBUF_SZ 512 /* max size for url-derived request */ char httpbuf[ DBUF_SZ ] = "\0", request[ RBUF_SZ ] = "\0"; ssize_t hlen; size_t rlen; int rc = 0; assert( (sockfd > 0) && srv ); TRACE( (void)tmfprintf( g_flog, "Reading command from socket [%d]\n", sockfd ) ); usleep(50); /* ..workaround VLC behavior: wait for receiving entire HTTP request, one packet per line */ hlen = recv( sockfd, httpbuf, sizeof(httpbuf), 0 ); if( 0>hlen ) { rc = errno; if( !no_fault(rc) ) mperror(g_flog, rc, "%s - recv (%d)", __func__, rc); else { TRACE( mperror(g_flog, rc, "%s - recv (%d)", __func__, rc) ); } return rc; } if (0 == hlen) { (void) tmfprintf (g_flog, "%s: client closed socket [%d]\n", __func__, sockfd); return 1; } /* DEBUG - re-enable if needed */ TRACE( (void)tmfprintf( g_flog, "HTTP buffer [%ld bytes] received\n%s", (long)hlen, httpbuf ) ); /* TRACE( (void) save_buffer( httpbuf, hlen, "/tmp/httpbuf.dat" ) ); */ rlen = sizeof(request); rc = get_request( httpbuf, (size_t)hlen, request, &rlen ); if (rc) return rc; TRACE( (void)tmfprintf( g_flog, "Request=[%s], length=[%lu]\n", request, (u_long)rlen ) ); rc = parse_auth( httpbuf, (size_t)hlen ); TRACE( (void)tmfprintf( g_flog, "Auth result=[%d]\n", rc ) ); if (rc) return rc; (void) memset( &srv->rq, 0, sizeof(srv->rq) ); rc = parse_param( request, rlen, srv->rq.cmd, sizeof(srv->rq.cmd), srv->rq.param, sizeof(srv->rq.param), srv->rq.tail, sizeof(srv->rq.tail) ); if( 0 == rc ) { TRACE( (void)tmfprintf( g_flog, "Command [%s] with params [%s], tail [%s]" " read from socket=[%d]\n", srv->rq.cmd, srv->rq.param, srv->rq.tail, sockfd) ); } return rc; }
/* write buffer to designated socket/file */ ssize_t write_buf( int fd, const char* data, const ssize_t len, FILE* log ) { ssize_t n = 0, nwr = 0, error = IO_ERR; int err = 0; for( n = 0; errno = 0, n < len ; ) { nwr = write( fd, &(data[n]), len - n ); if( nwr <= 0 ) { err = errno; if( EINTR == err ) { TRACE( (void)tmfprintf( log, "%s interrupted\n", __func__ ) ); continue; } else { if( would_block(err) ) error = IO_BLK; break; } } n += nwr; if( nwr != len ) { if( NULL != log ) { TRACE( (void)tmfprintf( log, "Fragment written %s[%ld:%ld]/[%ld] bytes\n", (len > n ? "P" : "F"), (long)nwr, (long)n, (long)len ) ); } } } if( nwr <= 0 ) { if( log ) { if (IO_BLK == error) (void)tmfprintf( log, "%s: socket time-out on write", __func__); else if( !no_fault(err) || g_uopt.is_verbose ) mperror( log, errno, "%s: write", __func__ ); } return error; } return n; }
/* read data chunk of designated size (or less) into buffer * (will *NOT* attempt to re-read if read less than expected * w/o interruption) */ ssize_t read_buf( int fd, char* data, const ssize_t len, FILE* log ) { ssize_t n = 0, nrd = 0, err = 0; for( n = 0; errno = 0, n < len ; ) { nrd = read( fd, &(data[n]), len - n ); if( nrd <= 0 ) { err = errno; if( EINTR == err ) { TRACE( (void)tmfprintf( log, "%s interrupted\n", __func__ ) ); errno = 0; continue; } else { break; } } n += nrd; /* if( nrd != len ) { if( NULL != log ) { TRACE( (void)tmfprintf( log, "Fragment read [%ld]/[%ld] bytes\n", (long)nrd, (long)len ) ); } } */ /* we only read as much as we can read at once (uninterrupted) */ break; } if( nrd < 0 ) { if( log ) { if( would_block(err) ) (void)tmfprintf( log, "%s: socket time-out on read", __func__); else if( !no_fault(err) || g_uopt.is_verbose ) mperror( log, errno, "%s: read", __func__ ); } } return n; }
static int watch_suword64(void *addr, uint64_t value) { klwp_t *lwp = ttolwp(curthread); int watchcode; caddr_t vaddr; int mapped; int rv = 0; int ta; label_t ljb; for (;;) { vaddr = (caddr_t)addr; watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL, S_WRITE); if (watchcode == 0 || ta != 0) { mapped = pr_mappage((caddr_t)addr, sizeof (value), S_WRITE, 1); if (on_fault(&ljb)) rv = -1; else suword64_noerr(addr, value); no_fault(); if (mapped) pr_unmappage((caddr_t)addr, sizeof (value), S_WRITE, 1); } if (watchcode && (!sys_watchpoint(vaddr, watchcode, ta) || lwp->lwp_sysabort)) { lwp->lwp_sysabort = 0; rv = -1; break; } if (watchcode == 0 || ta != 0) break; } return (rv); }
/* send HTTP response to socket */ static int send_http_response( int sockfd, int code, const char* reason) { static char msg[ 3072 ]; ssize_t nsent; a_socklen_t msglen; static const char CONTENT_TYPE[] = "Content-Type:application/octet-stream"; int err = 0; assert( (sockfd > 0) && code && reason ); msg[0] = '\0'; if ((200 == code) && g_uopt.h200_ftr[0]) { msglen = snprintf( msg, sizeof(msg) - 1, "HTTP/1.1 %d %s\r\nServer: %s\r\n%s\r\n%s\r\n\r\n", code, reason, g_app_info, CONTENT_TYPE, g_uopt.h200_ftr); } else { msglen = snprintf( msg, sizeof(msg) - 1, "HTTP/1.1 %d %s\r\nServer: %s\r\n%s\r\n\r\n", code, reason, g_app_info, CONTENT_TYPE ); } if( msglen <= 0 ) return ERR_INTERNAL; nsent = send( sockfd, msg, msglen, 0 ); if( -1 == nsent ) { err = errno; if( !no_fault(err) ) mperror(g_flog, err, "%s - send", __func__); else { TRACE( mperror(g_flog, err, "%s - send", __func__) ); } return ERR_INTERNAL; } TRACE( (void)tmfprintf( g_flog, "Sent HTTP response code=[%d], " "reason=[%s] to socket=[%d]\n%s\n", code, reason, sockfd, msg) ); return 0; }
/* * Generic form of watch_fuword8(), watch_fuword16(), etc. */ static int watch_fuword(const void *addr, void *dst, fuword_func func, size_t size) { klwp_t *lwp = ttolwp(curthread); int watchcode; caddr_t vaddr; int mapped; int rv = 0; int ta; label_t ljb; for (;;) { vaddr = (caddr_t)addr; watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ); if (watchcode == 0 || ta != 0) { mapped = pr_mappage((caddr_t)addr, size, S_READ, 1); if (on_fault(&ljb)) rv = -1; else (*func)(addr, dst); no_fault(); if (mapped) pr_unmappage((caddr_t)addr, size, S_READ, 1); } if (watchcode && (!sys_watchpoint(vaddr, watchcode, ta) || lwp->lwp_sysabort)) { lwp->lwp_sysabort = 0; rv = -1; break; } if (watchcode == 0 || ta != 0) break; } return (rv); }
int sendsig(int sig, k_siginfo_t *sip, void (*hdlr)()) { volatile int minstacksz; int newstack; label_t ljb; volatile caddr_t sp; caddr_t fp; struct regs *rp; volatile greg_t upc; volatile proc_t *p = ttoproc(curthread); klwp_t *lwp = ttolwp(curthread); ucontext_t *volatile tuc = NULL; ucontext_t *uc; siginfo_t *sip_addr; volatile int watched; rp = lwptoregs(lwp); upc = rp->r_pc; minstacksz = SA(sizeof (struct sigframe)) + SA(sizeof (*uc)); if (sip != NULL) minstacksz += SA(sizeof (siginfo_t)); ASSERT((minstacksz & (STACK_ALIGN - 1ul)) == 0); /* * Figure out whether we will be handling this signal on * an alternate stack specified by the user. Then allocate * and validate the stack requirements for the signal handler * context. on_fault will catch any faults. */ newstack = sigismember(&PTOU(curproc)->u_sigonstack, sig) && !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)); if (newstack) { fp = (caddr_t)(SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) + SA(lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN); } else if ((rp->r_ss & 0xffff) != UDS_SEL) { user_desc_t *ldt; /* * If the stack segment selector is -not- pointing at * the UDS_SEL descriptor and we have an LDT entry for * it instead, add the base address to find the effective va. */ if ((ldt = p->p_ldt) != NULL) fp = (caddr_t)rp->r_sp + USEGD_GETBASE(&ldt[SELTOIDX(rp->r_ss)]); else fp = (caddr_t)rp->r_sp; } else fp = (caddr_t)rp->r_sp; /* * Force proper stack pointer alignment, even in the face of a * misaligned stack pointer from user-level before the signal. * Don't use the SA() macro because that rounds up, not down. */ fp = (caddr_t)((uintptr_t)fp & ~(STACK_ALIGN - 1ul)); sp = fp - minstacksz; /* * Make sure lwp hasn't trashed its stack. */ if (sp >= (caddr_t)USERLIMIT || fp >= (caddr_t)USERLIMIT) { #ifdef DEBUG printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n", PTOU(p)->u_comm, p->p_pid, sig); printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", (void *)sp, (void *)hdlr, (uintptr_t)upc); printf("sp above USERLIMIT\n"); #endif return (0); } watched = watch_disable_addr((caddr_t)sp, minstacksz, S_WRITE); if (on_fault(&ljb)) goto badstack; if (sip != NULL) { zoneid_t zoneid; fp -= SA(sizeof (siginfo_t)); uzero(fp, sizeof (siginfo_t)); if (SI_FROMUSER(sip) && (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID && zoneid != sip->si_zoneid) { k_siginfo_t sani_sip = *sip; sani_sip.si_pid = p->p_zone->zone_zsched->p_pid; sani_sip.si_uid = 0; sani_sip.si_ctid = -1; sani_sip.si_zoneid = zoneid; copyout_noerr(&sani_sip, fp, sizeof (sani_sip)); } else copyout_noerr(sip, fp, sizeof (*sip)); sip_addr = (siginfo_t *)fp; if (sig == SIGPROF && curthread->t_rprof != NULL && curthread->t_rprof->rp_anystate) { /* * We stand on our head to deal with * the real time profiling signal. * Fill in the stuff that doesn't fit * in a normal k_siginfo structure. */ int i = sip->si_nsysarg; while (--i >= 0) suword32_noerr(&(sip_addr->si_sysarg[i]), (uint32_t)lwp->lwp_arg[i]); copyout_noerr(curthread->t_rprof->rp_state, sip_addr->si_mstate, sizeof (curthread->t_rprof->rp_state)); } } else sip_addr = NULL; /* save the current context on the user stack */ fp -= SA(sizeof (*tuc)); uc = (ucontext_t *)fp; tuc = kmem_alloc(sizeof (*tuc), KM_SLEEP); savecontext(tuc, &lwp->lwp_sigoldmask); copyout_noerr(tuc, uc, sizeof (*tuc)); kmem_free(tuc, sizeof (*tuc)); tuc = NULL; lwp->lwp_oldcontext = (uintptr_t)uc; if (newstack) { lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK; if (lwp->lwp_ustack) copyout_noerr(&lwp->lwp_sigaltstack, (stack_t *)lwp->lwp_ustack, sizeof (stack_t)); } /* * Set up signal handler arguments */ { struct sigframe frame; frame.sip = sip_addr; frame.ucp = uc; frame.signo = sig; frame.retaddr = (void (*)())0xffffffff; /* never return! */ copyout_noerr(&frame, sp, sizeof (frame)); } no_fault(); if (watched) watch_enable_addr((caddr_t)sp, minstacksz, S_WRITE); rp->r_sp = (greg_t)sp; rp->r_pc = (greg_t)hdlr; rp->r_ps = PSL_USER | (rp->r_ps & PS_IOPL); if ((rp->r_cs & 0xffff) != UCS_SEL || (rp->r_ss & 0xffff) != UDS_SEL) { rp->r_cs = UCS_SEL; rp->r_ss = UDS_SEL; } /* * Don't set lwp_eosys here. sendsig() is called via psig() after * lwp_eosys is handled, so setting it here would affect the next * system call. */ return (1); badstack: no_fault(); if (watched) watch_enable_addr((caddr_t)sp, minstacksz, S_WRITE); if (tuc) kmem_free(tuc, sizeof (*tuc)); #ifdef DEBUG printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n", PTOU(p)->u_comm, p->p_pid, sig); printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", (void *)sp, (void *)hdlr, (uintptr_t)upc); #endif return (0); }
int sendsig(int sig, k_siginfo_t *sip, void (*hdlr)()) { volatile int minstacksz; int newstack; label_t ljb; volatile caddr_t sp; caddr_t fp; volatile struct regs *rp; volatile greg_t upc; volatile proc_t *p = ttoproc(curthread); struct as *as = p->p_as; klwp_t *lwp = ttolwp(curthread); ucontext_t *volatile tuc = NULL; ucontext_t *uc; siginfo_t *sip_addr; volatile int watched; /* * This routine is utterly dependent upon STACK_ALIGN being * 16 and STACK_ENTRY_ALIGN being 8. Let's just acknowledge * that and require it. */ #if STACK_ALIGN != 16 || STACK_ENTRY_ALIGN != 8 #error "sendsig() amd64 did not find the expected stack alignments" #endif rp = lwptoregs(lwp); upc = rp->r_pc; /* * Since we're setting up to run the signal handler we have to * arrange that the stack at entry to the handler is (only) * STACK_ENTRY_ALIGN (i.e. 8) byte aligned so that when the handler * executes its push of %rbp, the stack realigns to STACK_ALIGN * (i.e. 16) correctly. * * The new sp will point to the sigframe and the ucontext_t. The * above means that sp (and thus sigframe) will be 8-byte aligned, * but not 16-byte aligned. ucontext_t, however, contains %xmm regs * which must be 16-byte aligned. Because of this, for correct * alignment, sigframe must be a multiple of 8-bytes in length, but * not 16-bytes. This will place ucontext_t at a nice 16-byte boundary. */ /* LINTED: logical expression always true: op "||" */ ASSERT((sizeof (struct sigframe) % 16) == 8); minstacksz = sizeof (struct sigframe) + SA(sizeof (*uc)); if (sip != NULL) minstacksz += SA(sizeof (siginfo_t)); ASSERT((minstacksz & (STACK_ENTRY_ALIGN - 1ul)) == 0); /* * Figure out whether we will be handling this signal on * an alternate stack specified by the user. Then allocate * and validate the stack requirements for the signal handler * context. on_fault will catch any faults. */ newstack = sigismember(&PTOU(curproc)->u_sigonstack, sig) && !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)); if (newstack) { fp = (caddr_t)(SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) + SA(lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN); } else { /* * Drop below the 128-byte reserved region of the stack frame * we're interrupting. */ fp = (caddr_t)rp->r_sp - STACK_RESERVE; } /* * Force proper stack pointer alignment, even in the face of a * misaligned stack pointer from user-level before the signal. */ fp = (caddr_t)((uintptr_t)fp & ~(STACK_ENTRY_ALIGN - 1ul)); /* * Most of the time during normal execution, the stack pointer * is aligned on a STACK_ALIGN (i.e. 16 byte) boundary. However, * (for example) just after a call instruction (which pushes * the return address), the callers stack misaligns until the * 'push %rbp' happens in the callee prolog. So while we should * expect the stack pointer to be always at least STACK_ENTRY_ALIGN * aligned, we should -not- expect it to always be STACK_ALIGN aligned. * We now adjust to ensure that the new sp is aligned to * STACK_ENTRY_ALIGN but not to STACK_ALIGN. */ sp = fp - minstacksz; if (((uintptr_t)sp & (STACK_ALIGN - 1ul)) == 0) { sp -= STACK_ENTRY_ALIGN; minstacksz = fp - sp; } /* * Now, make sure the resulting signal frame address is sane */ if (sp >= as->a_userlimit || fp >= as->a_userlimit) { #ifdef DEBUG printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n", PTOU(p)->u_comm, p->p_pid, sig); printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", (void *)sp, (void *)hdlr, (uintptr_t)upc); printf("sp above USERLIMIT\n"); #endif return (0); } watched = watch_disable_addr((caddr_t)sp, minstacksz, S_WRITE); if (on_fault(&ljb)) goto badstack; if (sip != NULL) { zoneid_t zoneid; fp -= SA(sizeof (siginfo_t)); uzero(fp, sizeof (siginfo_t)); if (SI_FROMUSER(sip) && (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID && zoneid != sip->si_zoneid) { k_siginfo_t sani_sip = *sip; sani_sip.si_pid = p->p_zone->zone_zsched->p_pid; sani_sip.si_uid = 0; sani_sip.si_ctid = -1; sani_sip.si_zoneid = zoneid; copyout_noerr(&sani_sip, fp, sizeof (sani_sip)); } else copyout_noerr(sip, fp, sizeof (*sip)); sip_addr = (siginfo_t *)fp; if (sig == SIGPROF && curthread->t_rprof != NULL && curthread->t_rprof->rp_anystate) { /* * We stand on our head to deal with * the real time profiling signal. * Fill in the stuff that doesn't fit * in a normal k_siginfo structure. */ int i = sip->si_nsysarg; while (--i >= 0) sulword_noerr( (ulong_t *)&(sip_addr->si_sysarg[i]), (ulong_t)lwp->lwp_arg[i]); copyout_noerr(curthread->t_rprof->rp_state, sip_addr->si_mstate, sizeof (curthread->t_rprof->rp_state)); } } else sip_addr = NULL; /* * save the current context on the user stack directly after the * sigframe. Since sigframe is 8-byte-but-not-16-byte aligned, * and since sizeof (struct sigframe) is 24, this guarantees * 16-byte alignment for ucontext_t and its %xmm registers. */ uc = (ucontext_t *)(sp + sizeof (struct sigframe)); tuc = kmem_alloc(sizeof (*tuc), KM_SLEEP); no_fault(); savecontext(tuc, &lwp->lwp_sigoldmask); if (on_fault(&ljb)) goto badstack; copyout_noerr(tuc, uc, sizeof (*tuc)); kmem_free(tuc, sizeof (*tuc)); tuc = NULL; lwp->lwp_oldcontext = (uintptr_t)uc; if (newstack) { lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK; if (lwp->lwp_ustack) copyout_noerr(&lwp->lwp_sigaltstack, (stack_t *)lwp->lwp_ustack, sizeof (stack_t)); } /* * Set up signal handler return and stack linkage */ { struct sigframe frame; /* * ensure we never return "normally" */ frame.retaddr = (caddr_t)(uintptr_t)-1L; frame.signo = sig; frame.sip = sip_addr; copyout_noerr(&frame, sp, sizeof (frame)); } no_fault(); if (watched) watch_enable_addr((caddr_t)sp, minstacksz, S_WRITE); /* * Set up user registers for execution of signal handler. */ rp->r_sp = (greg_t)sp; rp->r_pc = (greg_t)hdlr; rp->r_ps = PSL_USER | (rp->r_ps & PS_IOPL); rp->r_rdi = sig; rp->r_rsi = (uintptr_t)sip_addr; rp->r_rdx = (uintptr_t)uc; if ((rp->r_cs & 0xffff) != UCS_SEL || (rp->r_ss & 0xffff) != UDS_SEL) { /* * Try our best to deliver the signal. */ rp->r_cs = UCS_SEL; rp->r_ss = UDS_SEL; } /* * Don't set lwp_eosys here. sendsig() is called via psig() after * lwp_eosys is handled, so setting it here would affect the next * system call. */ return (1); badstack: no_fault(); if (watched) watch_enable_addr((caddr_t)sp, minstacksz, S_WRITE); if (tuc) kmem_free(tuc, sizeof (*tuc)); #ifdef DEBUG printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n", PTOU(p)->u_comm, p->p_pid, sig); printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", (void *)sp, (void *)hdlr, (uintptr_t)upc); #endif return (0); }
static int watch_copyinstr( const char *uaddr, char *kaddr, size_t maxlength, size_t *lencopied) { klwp_t *lwp = ttolwp(curthread); size_t resid; int error = 0; label_t ljb; if ((resid = maxlength) == 0) return (ENAMETOOLONG); while (resid && error == 0) { int watchcode; caddr_t vaddr; size_t part; size_t len; size_t size; int ta; int mapped; if ((part = PAGESIZE - (((uintptr_t)uaddr) & PAGEOFFSET)) > resid) part = resid; if (!pr_is_watchpage((caddr_t)uaddr, S_READ)) watchcode = 0; else { vaddr = (caddr_t)uaddr; watchcode = pr_is_watchpoint(&vaddr, &ta, part, &len, S_READ); if (watchcode) { if (ta == 0) part = vaddr - uaddr; else { len += vaddr - uaddr; if (part > len) part = len; } } } /* * Copy the initial part, up to a watched address, if any. */ if (part != 0) { mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1); if (on_fault(&ljb)) error = EFAULT; else error = copyinstr_noerr(uaddr, kaddr, part, &size); no_fault(); if (mapped) pr_unmappage((caddr_t)uaddr, part, S_READ, 1); uaddr += size; kaddr += size; resid -= size; if (error == ENAMETOOLONG && resid > 0) error = 0; if (error != 0 || (watchcode && (uaddr < vaddr || kaddr[-1] == '\0'))) break; /* didn't reach the watched area */ } /* * If trapafter was specified, then copy through the * watched area before taking the watchpoint trap. */ while (resid && watchcode && ta && len > part && error == 0 && size == part && kaddr[-1] != '\0') { len -= part; if ((part = PAGESIZE) > resid) part = resid; if (part > len) part = len; mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1); if (on_fault(&ljb)) error = EFAULT; else error = copyinstr_noerr(uaddr, kaddr, part, &size); no_fault(); if (mapped) pr_unmappage((caddr_t)uaddr, part, S_READ, 1); uaddr += size; kaddr += size; resid -= size; if (error == ENAMETOOLONG && resid > 0) error = 0; } /* if we hit a watched address, do the watchpoint logic */ if (watchcode && (!sys_watchpoint(vaddr, watchcode, ta) || lwp->lwp_sysabort)) { lwp->lwp_sysabort = 0; error = EFAULT; break; } if (error == 0 && part != 0 && (size < part || kaddr[-1] == '\0')) break; } if (error != EFAULT && lencopied) *lencopied = maxlength - resid; return (error); }
static int watch_xcopyout(const void *kaddr, void *uaddr, size_t count) { klwp_t *lwp = ttolwp(curthread); caddr_t watch_uaddr = (caddr_t)uaddr; caddr_t watch_kaddr = (caddr_t)kaddr; int error = 0; label_t ljb; while (count && error == 0) { int watchcode; caddr_t vaddr; size_t part; size_t len; int ta; int mapped; if ((part = PAGESIZE - (((uintptr_t)uaddr) & PAGEOFFSET)) > count) part = count; if (!pr_is_watchpage(watch_uaddr, S_WRITE)) watchcode = 0; else { vaddr = watch_uaddr; watchcode = pr_is_watchpoint(&vaddr, &ta, part, &len, S_WRITE); if (watchcode) { if (ta == 0) part = vaddr - watch_uaddr; else { len += vaddr - watch_uaddr; if (part > len) part = len; } } } /* * Copy the initial part, up to a watched address, if any. */ if (part != 0) { mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1); if (on_fault(&ljb)) error = EFAULT; else copyout_noerr(watch_kaddr, watch_uaddr, part); no_fault(); if (mapped) pr_unmappage(watch_uaddr, part, S_WRITE, 1); watch_uaddr += part; watch_kaddr += part; count -= part; } /* * If trapafter was specified, then copy through the * watched area before taking the watchpoint trap. */ while (count && watchcode && ta && len > part && error == 0) { len -= part; if ((part = PAGESIZE) > count) part = count; if (part > len) part = len; mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1); if (on_fault(&ljb)) error = EFAULT; else copyout_noerr(watch_kaddr, watch_uaddr, part); no_fault(); if (mapped) pr_unmappage(watch_uaddr, part, S_WRITE, 1); watch_uaddr += part; watch_kaddr += part; count -= part; } /* if we hit a watched address, do the watchpoint logic */ if (watchcode && (!sys_watchpoint(vaddr, watchcode, ta) || lwp->lwp_sysabort)) { lwp->lwp_sysabort = 0; error = EFAULT; break; } } return (error); }
static int futex_wake_op_execute(int32_t *addr, int32_t val3) { int32_t op = FUTEX_OP_OP(val3); int32_t cmp = FUTEX_OP_CMP(val3); int32_t cmparg = FUTEX_OP_CMPARG(val3); int32_t oparg, oldval, newval; label_t ljb; int rval; if ((uintptr_t)addr >= KERNELBASE) return (set_errno(EFAULT)); if (on_fault(&ljb)) return (set_errno(EFAULT)); oparg = FUTEX_OP_OPARG(val3); do { oldval = *addr; newval = oparg; switch (op) { case FUTEX_OP_SET: break; case FUTEX_OP_ADD: newval += oparg; break; case FUTEX_OP_OR: newval |= oparg; break; case FUTEX_OP_ANDN: newval &= ~oparg; break; case FUTEX_OP_XOR: newval ^= oparg; break; default: no_fault(); return (set_errno(EINVAL)); } } while (atomic_cas_32((uint32_t *)addr, oldval, newval) != oldval); no_fault(); switch (cmp) { case FUTEX_OP_CMP_EQ: rval = (oldval == cmparg); break; case FUTEX_OP_CMP_NE: rval = (oldval != cmparg); break; case FUTEX_OP_CMP_LT: rval = (oldval < cmparg); break; case FUTEX_OP_CMP_LE: rval = (oldval <= cmparg); break; case FUTEX_OP_CMP_GT: rval = (oldval > cmparg); break; case FUTEX_OP_CMP_GE: rval = (oldval >= cmparg); break; default: return (set_errno(EINVAL)); } return (rval); }
/*ARGSUSED3*/ static int mmrw(dev_t dev, struct uio *uio, enum uio_rw rw, cred_t *cred) { pfn_t v; struct iovec *iov; int error = 0; size_t c; ssize_t oresid = uio->uio_resid; minor_t minor = getminor(dev); while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor) { case M_MEM: memlist_read_lock(); if (!address_in_memlist(phys_install, (uint64_t)uio->uio_loffset, 1)) { memlist_read_unlock(); error = EFAULT; break; } memlist_read_unlock(); v = BTOP((u_offset_t)uio->uio_loffset); error = mmio(uio, rw, v, uio->uio_loffset & PAGEOFFSET, 0, NULL); break; case M_KMEM: case M_ALLKMEM: { page_t **ppp = NULL; caddr_t vaddr = (caddr_t)uio->uio_offset; int try_lock = NEED_LOCK_KVADDR(vaddr); int locked = 0; if ((error = plat_mem_do_mmio(uio, rw)) != ENOTSUP) break; /* * If vaddr does not map a valid page, as_pagelock() * will return failure. Hence we can't check the * return value and return EFAULT here as we'd like. * seg_kp and seg_kpm do not properly support * as_pagelock() for this context so we avoid it * using the try_lock set check above. Some day when * the kernel page locking gets redesigned all this * muck can be cleaned up. */ if (try_lock) locked = (as_pagelock(&kas, &ppp, vaddr, PAGESIZE, S_WRITE) == 0); v = hat_getpfnum(kas.a_hat, (caddr_t)(uintptr_t)uio->uio_loffset); if (v == PFN_INVALID) { if (locked) as_pageunlock(&kas, ppp, vaddr, PAGESIZE, S_WRITE); error = EFAULT; break; } error = mmio(uio, rw, v, uio->uio_loffset & PAGEOFFSET, minor == M_ALLKMEM || mm_kmem_io_access, (locked && ppp) ? *ppp : NULL); if (locked) as_pageunlock(&kas, ppp, vaddr, PAGESIZE, S_WRITE); } break; case M_ZERO: if (rw == UIO_READ) { label_t ljb; if (on_fault(&ljb)) { no_fault(); error = EFAULT; break; } uzero(iov->iov_base, iov->iov_len); no_fault(); uio->uio_resid -= iov->iov_len; uio->uio_loffset += iov->iov_len; break; } /* else it's a write, fall through to NULL case */ /*FALLTHROUGH*/ case M_NULL: if (rw == UIO_READ) return (0); c = iov->iov_len; iov->iov_base += c; iov->iov_len -= c; uio->uio_loffset += c; uio->uio_resid -= c; break; } } return (uio->uio_resid == oresid ? error : 0); }