static int poll_one(struct file *file) { int retval = 1; poll_table *table; struct poll_wqueues pwq; poll_initwait(&pwq); table = &pwq.pt; for (;;) { int mask; set_current_state(TASK_INTERRUPTIBLE); mask = file->f_op->poll(file, table); if (mask & POLLIN) break; table = NULL; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } set_current_state(TASK_RUNNING); poll_freewait(&pwq); return retval; }
static int poll_one(struct file *file) { int retval = 1; poll_table *table; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) poll_table wait_table; poll_initwait(&wait_table); table = &wait_table; #else struct poll_wqueues pwq; poll_initwait(&pwq); table = &pwq.pt; #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) */ for (;;) { int mask; set_current_state(TASK_INTERRUPTIBLE); mask = file->f_op->poll(file, table); if (mask & POLLIN) break; table = NULL; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } set_current_state(TASK_RUNNING); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) poll_freewait(&wait_table); #else poll_freewait(&pwq); #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) poll_freewait(&wait_table); #else poll_freewait(&pwq); #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) */ return retval; }
int async_poll(struct kiocb *iocb, int events) { unsigned int mask; async_poll_table *pasync; poll_table *p; /* Fast path */ if (iocb->filp->f_op && iocb->filp->f_op->poll) { mask = iocb->filp->f_op->poll(iocb->filp, NULL); mask &= events | POLLERR | POLLHUP; if (mask & events) return mask; } pasync = kmem_cache_alloc(async_poll_table_cache, SLAB_KERNEL); if (!pasync) return -ENOMEM; p = (poll_table *)pasync; poll_initwait(p); wtd_set_action(&pasync->wtd, async_poll_complete, pasync); p->iocb = iocb; pasync->wake = 0; pasync->sync = 0; pasync->events = events; pasync->pt_page.entry = pasync->pt_page.entries; pasync->pt_page.size = sizeof(pasync->pt_page); p->table = &pasync->pt_page; iocb->data = p; iocb->users ++; wmb(); mask = DEFAULT_POLLMASK; if (iocb->filp->f_op && iocb->filp->f_op->poll) mask = iocb->filp->f_op->poll(iocb->filp, p); mask &= events | POLLERR | POLLHUP; if (mask && xchg(&iocb->data, NULL)) { poll_freewait(p); aio_complete(iocb, mask, 0); } iocb->cancel = async_poll_cancel; aio_put_req(iocb); return 0; }
static inline int dpram_poll(struct file *filp) { int ret; unsigned int mask; struct poll_wqueues wait_table; //poll_table wait_table; mm_segment_t oldfs; DPRINTK(2, "BEGIN\n"); poll_initwait(&wait_table); for (;;) { set_current_state(TASK_INTERRUPTIBLE); oldfs = get_fs(); set_fs(get_ds()); mask = filp->f_op->poll(filp, &wait_table.pt); set_fs(oldfs); if (mask & POLLIN) { /* got data */ ret = 0; break; } if (wait_table.error) { DPRINTK(1, "error in f_op->poll()\n"); ret = wait_table.error; break; } if (signal_pending(current)) { /* got signal */ ret = -ERESTARTSYS; break; } schedule(); } set_current_state(TASK_RUNNING); poll_freewait(&wait_table); DPRINTK(2, "END\n"); return ret; }
static int poll_one(struct file *file, struct poll_wqueues *pwq) { int retval = 1; poll_table *table; poll_initwait(pwq); table = &pwq->pt; for (;;) { int mask; mask = file->f_op->poll(file, table); if (mask & POLLIN) break; table = NULL; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } poll_schedule(pwq, TASK_INTERRUPTIBLE); } poll_freewait(pwq); return retval; }
void poll_test(void) { struct poll_wqueues test; return poll_initwait(&test); }
int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, s32 *ctl_len, char *data_buf, int data_maxlen, s32 *data_len, int *flags_p) { int error; int oldflags; struct file *filp; struct inode *ino; struct sol_socket_struct *sock; struct T_unitdata_ind udi; mm_segment_t old_fs = get_fs(); long args[6]; char *tmpbuf; int tmplen; int (*sys_socketcall)(int, unsigned long *) = (int (*)(int, unsigned long *))SYS(socketcall); int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *); SOLD("entry"); SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p)); filp = current->files->fd[fd]; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL)); if ( ctl_maxlen > 0 && !sock->pfirst && ino->u.socket_i.type == SOCK_STREAM && sock->state == TS_IDLE) { SOLD("calling LISTEN"); args[0] = fd; args[1] = -1; set_fs(KERNEL_DS); sys_socketcall(SYS_LISTEN, args); set_fs(old_fs); SOLD("LISTEN done"); } if (!(filp->f_flags & O_NONBLOCK)) { poll_table wait_table, *wait; poll_initwait(&wait_table); wait = &wait_table; for(;;) { SOLD("loop"); set_current_state(TASK_INTERRUPTIBLE); /* ! ( l<0 || ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ /* ( ! l<0 && ! ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ /* ( l>=0 && ( ! l>=0 || ! ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ /* ( l>=0 && ( l<0 || ( pfirst && ! (flags == HIPRI && pri != HIPRI) ) ) ) */ /* ( l>=0 && ( l<0 || ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) ) */ /* ( l>=0 && ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) */ if (ctl_maxlen >= 0 && sock->pfirst && (*flags_p != MSG_HIPRI || sock->pfirst->pri == MSG_HIPRI)) break; SOLD("cond 1 passed"); if ( #if 1 *flags_p != MSG_HIPRI && #endif ((filp->f_op->poll(filp, wait) & POLLIN) || (filp->f_op->poll(filp, NULL) & POLLIN) || signal_pending(current)) ) { break; } if( *flags_p == MSG_HIPRI ) { SOLD("avoiding lockup"); break ; } if(wait_table.error) { SOLD("wait-table error"); poll_freewait(&wait_table); return wait_table.error; } SOLD("scheduling"); schedule(); } SOLD("loop done"); current->state = TASK_RUNNING; poll_freewait(&wait_table); if (signal_pending(current)) { SOLD("signal pending"); return -EINTR; } } if (ctl_maxlen >= 0 && sock->pfirst) { struct T_primsg *it = sock->pfirst; int l = min_t(int, ctl_maxlen, it->length); SCHECK_MAGIC((char*)((u64)(((char *)&it->type)+sock->offset+it->length+7)&~7),MKCTL_MAGIC); SOLD("purting ctl data"); if(copy_to_user(ctl_buf, (char*)&it->type + sock->offset, l)) return -EFAULT; SOLD("pur it"); if(put_user(l, ctl_len)) return -EFAULT; SOLD("set ctl_len"); *flags_p = it->pri; it->length -= l; if (it->length) { SOLD("more ctl"); sock->offset += l; return MORECTL; } else { SOLD("removing message"); sock->pfirst = it->next; if (!sock->pfirst) sock->plast = NULL; SOLDD(("getmsg kfree %016lx->%016lx\n", it, sock->pfirst)); mykfree(it); sock->offset = 0; SOLD("ctl done"); return 0; } } *flags_p = 0; if (ctl_maxlen >= 0) { SOLD("ACCEPT perhaps?"); if (ino->u.socket_i.type == SOCK_STREAM && sock->state == TS_IDLE) { struct T_conn_ind ind; char *buf = getpage(); int len = BUF_SIZE; SOLD("trying ACCEPT"); if (put_user(ctl_maxlen - sizeof(ind), ctl_len)) return -EFAULT; args[0] = fd; args[1] = (long)buf; args[2] = (long)&len; oldflags = filp->f_flags; filp->f_flags |= O_NONBLOCK; SOLD("calling ACCEPT"); set_fs(KERNEL_DS); error = sys_socketcall(SYS_ACCEPT, args); set_fs(old_fs); filp->f_flags = oldflags; if (error < 0) { SOLD("some error"); putpage(buf); return error; } if (error) { SOLD("connect"); putpage(buf); if (sizeof(ind) > ctl_maxlen) { SOLD("generating CONN_IND"); ind.PRIM_type = T_CONN_IND; ind.SRC_length = len; ind.SRC_offset = sizeof(ind); ind.OPT_length = ind.OPT_offset = 0; ind.SEQ_number = error; if(copy_to_user(ctl_buf, &ind, sizeof(ind))|| put_user(sizeof(ind)+ind.SRC_length,ctl_len)) return -EFAULT; SOLD("CONN_IND created"); } if (data_maxlen >= 0) put_user(0, data_len); SOLD("CONN_IND done"); return 0; } if (len>ctl_maxlen) { SOLD("data don't fit"); putpage(buf); return -EFAULT; /* XXX - is this ok ? */ } if(copy_to_user(ctl_buf,buf,len) || put_user(len,ctl_len)){ SOLD("can't copy data"); putpage(buf); return -EFAULT; } SOLD("ACCEPT done"); putpage(buf); } } SOLD("checking data req"); if (data_maxlen <= 0) { if (data_maxlen == 0) put_user(0, data_len); if (ctl_maxlen >= 0) put_user(0, ctl_len); return -EAGAIN; } SOLD("wants data"); if (ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) { SOLD("udi fits"); tmpbuf = ctl_buf + sizeof(udi); tmplen = ctl_maxlen - sizeof(udi); } else { SOLD("udi does not fit"); tmpbuf = NULL; tmplen = 0; } if (put_user(tmplen, ctl_len)) return -EFAULT; SOLD("set ctl_len"); oldflags = filp->f_flags; filp->f_flags |= O_NONBLOCK; SOLD("calling recvfrom"); sys_recvfrom = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom); error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr*)tmpbuf, ctl_len); filp->f_flags = oldflags; if (error < 0) return error; SOLD("error >= 0" ) ; if (error && ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) { SOLD("generating udi"); udi.PRIM_type = T_UNITDATA_IND; get_user(udi.SRC_length, ctl_len); udi.SRC_offset = sizeof(udi); udi.OPT_length = udi.OPT_offset = 0; copy_to_user(ctl_buf, &udi, sizeof(udi)); put_user(sizeof(udi)+udi.SRC_length, ctl_len); SOLD("udi done"); } else put_user(0, ctl_len); put_user(error, data_len); SOLD("done"); return 0; }
static int do_ncp_rpc_call(struct ncp_server *server, int size, struct ncp_reply_header* reply_buf, int max_reply_size) { struct file *file; struct socket *sock; int result; char *start = server->packet; poll_table wait_table; int init_timeout, max_timeout; int timeout; int retrans; int major_timeout_seen; int acknowledge_seen; int n; /* We have to check the result, so store the complete header */ struct ncp_request_header request = *((struct ncp_request_header *) (server->packet)); struct ncp_reply_header reply; file = server->ncp_filp; sock = &file->f_dentry->d_inode->u.socket_i; init_timeout = server->m.time_out; max_timeout = NCP_MAX_RPC_TIMEOUT; retrans = server->m.retry_count; major_timeout_seen = 0; acknowledge_seen = 0; for (n = 0, timeout = init_timeout;; n++, timeout <<= 1) { /* DDPRINTK("ncpfs: %08lX:%02X%02X%02X%02X%02X%02X:%04X\n", htonl(server->m.serv_addr.sipx_network), server->m.serv_addr.sipx_node[0], server->m.serv_addr.sipx_node[1], server->m.serv_addr.sipx_node[2], server->m.serv_addr.sipx_node[3], server->m.serv_addr.sipx_node[4], server->m.serv_addr.sipx_node[5], ntohs(server->m.serv_addr.sipx_port)); */ DDPRINTK("ncpfs: req.typ: %04X, con: %d, " "seq: %d", request.type, (request.conn_high << 8) + request.conn_low, request.sequence); DDPRINTK(" func: %d\n", request.function); result = _send(sock, (void *) start, size); if (result < 0) { printk(KERN_ERR "ncp_rpc_call: send error = %d\n", result); break; } re_select: poll_initwait(&wait_table); /* mb() is not necessary because ->poll() will serialize instructions adding the wait_table waitqueues in the waitqueue-head before going to calculate the mask-retval. */ __set_current_state(TASK_INTERRUPTIBLE); if (!(sock->ops->poll(file, sock, &wait_table) & POLLIN)) { int timed_out; if (timeout > max_timeout) { /* JEJB/JSP 2/7/94 * This is useful to see if the system is * hanging */ if (acknowledge_seen == 0) { printk(KERN_WARNING "NCP max timeout\n"); } timeout = max_timeout; } timed_out = !schedule_timeout(timeout); poll_freewait(&wait_table); current->state = TASK_RUNNING; if (signal_pending(current)) { result = -ERESTARTSYS; break; } if(wait_table.error) { result = wait_table.error; break; } if (timed_out) { if (n < retrans) continue; if (server->m.flags & NCP_MOUNT_SOFT) { printk(KERN_WARNING "NCP server not responding\n"); result = -EIO; break; } n = 0; timeout = init_timeout; if (init_timeout < max_timeout) init_timeout <<= 1; if (!major_timeout_seen) { printk(KERN_WARNING "NCP server not responding\n"); } major_timeout_seen = 1; continue; } } else { poll_freewait(&wait_table); } current->state = TASK_RUNNING; /* Get the header from the next packet using a peek, so keep it * on the recv queue. If it is wrong, it will be some reply * we don't now need, so discard it */ result = _recv(sock, (void *) &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT); if (result < 0) { if (result == -EAGAIN) { DDPRINTK("ncp_rpc_call: bad select ready\n"); goto re_select; } if (result == -ECONNREFUSED) { DPRINTK("ncp_rpc_call: server playing coy\n"); goto re_select; } if (result != -ERESTARTSYS) { printk(KERN_ERR "ncp_rpc_call: recv error = %d\n", -result); } break; } if ((result == sizeof(reply)) && (reply.type == NCP_POSITIVE_ACK)) { /* Throw away the packet */ DPRINTK("ncp_rpc_call: got positive acknowledge\n"); _recv(sock, (void *) &reply, sizeof(reply), MSG_DONTWAIT); n = 0; timeout = max_timeout; acknowledge_seen = 1; goto re_select; } DDPRINTK("ncpfs: rep.typ: %04X, con: %d, tsk: %d," "seq: %d\n", reply.type, (reply.conn_high << 8) + reply.conn_low, reply.task, reply.sequence); if ((result >= sizeof(reply)) && (reply.type == NCP_REPLY) && ((request.type == NCP_ALLOC_SLOT_REQUEST) || ((reply.sequence == request.sequence) && (reply.conn_low == request.conn_low) /* seem to get wrong task from NW311 && (reply.task == request.task) */ && (reply.conn_high == request.conn_high)))) { if (major_timeout_seen) printk(KERN_NOTICE "NCP server OK\n"); break; } /* JEJB/JSP 2/7/94 * we have xid mismatch, so discard the packet and start * again. What a hack! but I can't call recvfrom with * a null buffer yet. */ _recv(sock, (void *) &reply, sizeof(reply), MSG_DONTWAIT); DPRINTK("ncp_rpc_call: reply mismatch\n"); goto re_select; } /* * we have the correct reply, so read into the correct place and * return it */ result = _recv(sock, (void *)reply_buf, max_reply_size, MSG_DONTWAIT); if (result < 0) { printk(KERN_WARNING "NCP: notice message: result=%d\n", result); } else if (result < sizeof(struct ncp_reply_header)) { printk(KERN_ERR "NCP: just caught a too small read memory size..., " "email to NET channel\n"); printk(KERN_ERR "NCP: result=%d\n", result); result = -EIO; } return result; }
static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) { poll_table wait_table; struct file *file; struct socket *sock; int init_timeout; size_t dataread; int result = 0; file = server->ncp_filp; sock = &file->f_dentry->d_inode->u.socket_i; dataread = 0; init_timeout = server->m.time_out * 20; /* hard-mounted volumes have no timeout, except connection close... */ if (!(server->m.flags & NCP_MOUNT_SOFT)) init_timeout = 0x7FFF0000; while (len) { poll_initwait(&wait_table); /* mb() is not necessary because ->poll() will serialize instructions adding the wait_table waitqueues in the waitqueue-head before going to calculate the mask-retval. */ __set_current_state(TASK_INTERRUPTIBLE); if (!(sock->ops->poll(file, sock, &wait_table) & POLLIN)) { init_timeout = schedule_timeout(init_timeout); poll_freewait(&wait_table); current->state = TASK_RUNNING; if (signal_pending(current)) { return -ERESTARTSYS; } if (!init_timeout) { return -EIO; } if(wait_table.error) { return wait_table.error; } } else { poll_freewait(&wait_table); } current->state = TASK_RUNNING; result = _recv(sock, buffer, len, MSG_DONTWAIT); if (result < 0) { if (result == -EAGAIN) { DDPRINTK("ncpfs: tcp: bad select ready\n"); continue; } return result; } if (result == 0) { printk(KERN_ERR "ncpfs: tcp: EOF on socket\n"); return -EIO; } if (result > len) { printk(KERN_ERR "ncpfs: tcp: bug in recvmsg\n"); return -EIO; } dataread += result; buffer += result; len -= result; } return 0; }
asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long timeout) { struct poll_wqueues table; int fdcount, err; unsigned int i; struct poll_list *head; struct poll_list *walk; /* Do a sanity check on nfds ... */ if (nfds > current->files->max_fdset && nfds > OPEN_MAX) return -EINVAL; if (timeout) { /* Careful about overflow in the intermediate values */ if ((unsigned long) timeout < MAX_SCHEDULE_TIMEOUT / HZ) timeout = (unsigned long)(timeout*HZ+999)/1000+1; else /* Negative or overflow */ timeout = MAX_SCHEDULE_TIMEOUT; } poll_initwait(&table); head = NULL; walk = NULL; i = nfds; err = -ENOMEM; while(i!=0) { struct poll_list *pp; pp = kmalloc(sizeof(struct poll_list)+ sizeof(struct pollfd)* (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i), GFP_KERNEL); if(pp==NULL) goto out_fds; pp->next=NULL; pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i); if (head == NULL) head = pp; else walk->next = pp; walk = pp; if (copy_from_user(pp->entries, ufds + nfds-i, sizeof(struct pollfd)*pp->len)) { err = -EFAULT; goto out_fds; } i -= pp->len; } fdcount = do_poll(nfds, head, &table, timeout); /* OK, now copy the revents fields back to user space. */ walk = head; err = -EFAULT; while(walk != NULL) { struct pollfd *fds = walk->entries; int j; for (j=0; j < walk->len; j++, ufds++) { if(__put_user(fds[j].revents, &ufds->revents)) goto out_fds; } walk = walk->next; } err = fdcount; if (!fdcount && signal_pending(current)) err = -EINTR; out_fds: walk = head; while(walk!=NULL) { struct poll_list *pp = walk->next; kfree(walk); walk = pp; } poll_freewait(&table); return err; }
int do_select(int n, fd_set_bits *fds, long *timeout) { struct poll_wqueues table; poll_table *wait; int retval, i; long __timeout = *timeout; spin_lock(¤t->files->file_lock); retval = max_select_fd(n, fds); spin_unlock(¤t->files->file_lock); if (retval < 0) return retval; n = retval; poll_initwait(&table); wait = &table.pt; if (!__timeout) wait = NULL; retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; set_current_state(TASK_INTERRUPTIBLE); inp = fds->in; outp = fds->out; exp = fds->ex; rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; for (i = 0; i < n; ++rinp, ++routp, ++rexp) { unsigned long in, out, ex, all_bits, bit = 1, mask, j; unsigned long res_in = 0, res_out = 0, res_ex = 0; struct file_operations *f_op = NULL; struct file *file = NULL; in = *inp++; out = *outp++; ex = *exp++; all_bits = in | out | ex; if (all_bits == 0) { i += __NFDBITS; continue; } for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { if (i >= n) break; if (!(bit & all_bits)) continue; file = fget(i); if (file) { f_op = file->f_op; mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) mask = (*f_op->poll)(file, retval ? NULL : wait); fput(file); if ((mask & POLLIN_SET) && (in & bit)) { res_in |= bit; retval++; } if ((mask & POLLOUT_SET) && (out & bit)) { res_out |= bit; retval++; } if ((mask & POLLEX_SET) && (ex & bit)) { res_ex |= bit; retval++; } } cond_resched(); } if (res_in) *rinp = res_in; if (res_out) *routp = res_out; if (res_ex) *rexp = res_ex; } wait = NULL; if (retval || !__timeout || signal_pending(current)) break; if(table.error) { retval = table.error; break; } __timeout = schedule_timeout(__timeout); } __set_current_state(TASK_RUNNING); poll_freewait(&table); /* * Up-to-date the caller timeout. */ *timeout = __timeout; return retval; }
asmlinkage long sys_poll(struct pollfd *ufds, unsigned int nfds, int timeout_msecs) { int i, j, fdcount, err; struct pollfd **fds; poll_table table, *wait; int nchunks, nleft; long timeout; int64_t lltimeout; /* Do a sanity check on nfds ... */ if (nfds > current->rlim[RLIMIT_NOFILE].rlim_cur) return -EINVAL; if (timeout_msecs) { if (timeout_msecs < 0) timeout = MAX_SCHEDULE_TIMEOUT; else { lltimeout = (int64_t)timeout_msecs * HZ + 999; do_div(lltimeout, 1000); lltimeout++; if (lltimeout > MAX_SCHEDULE_TIMEOUT) timeout = MAX_SCHEDULE_TIMEOUT; else timeout = (long)lltimeout; } } else timeout = 0; poll_initwait(&table); wait = &table; if (!timeout) wait = NULL; err = -ENOMEM; fds = NULL; if (nfds != 0) { fds = (struct pollfd **)kmalloc( (1 + (nfds - 1) / POLLFD_PER_PAGE) * sizeof(struct pollfd *), GFP_KERNEL); if (fds == NULL) goto out; } nchunks = 0; nleft = nfds; while (nleft > POLLFD_PER_PAGE) { /* allocate complete PAGE_SIZE chunks */ fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); if (fds[nchunks] == NULL) goto out_fds; nchunks++; nleft -= POLLFD_PER_PAGE; } if (nleft) { /* allocate last PAGE_SIZE chunk, only nleft elements used */ fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); if (fds[nchunks] == NULL) goto out_fds; } err = -EFAULT; for (i=0; i < nchunks; i++) if (copy_from_user(fds[i], ufds + i*POLLFD_PER_PAGE, PAGE_SIZE)) goto out_fds1; if (nleft) { if (copy_from_user(fds[nchunks], ufds + nchunks*POLLFD_PER_PAGE, nleft * sizeof(struct pollfd))) goto out_fds1; } fdcount = do_poll(nfds, nchunks, nleft, fds, wait, timeout); /* OK, now copy the revents fields back to user space. */ for(i=0; i < nchunks; i++) for (j=0; j < POLLFD_PER_PAGE; j++, ufds++) __put_user((fds[i] + j)->revents, &ufds->revents); if (nleft) for (j=0; j < nleft; j++, ufds++) __put_user((fds[nchunks] + j)->revents, &ufds->revents); err = fdcount; if (!fdcount && signal_pending(current)) err = -EINTR; out_fds1: if (nleft) free_page((unsigned long)(fds[nchunks])); out_fds: for (i=0; i < nchunks; i++) free_page((unsigned long)(fds[i])); if (nfds != 0) kfree(fds); out: poll_freewait(&table); return err; }
int do_select(int n, fd_set_bits *fds, long *timeout) { poll_table table, *wait; int retval, i, off; long __timeout = *timeout; read_lock(¤t->files->file_lock); retval = max_select_fd(n, fds); read_unlock(¤t->files->file_lock); if (retval < 0) return retval; n = retval; poll_initwait(&table); wait = &table; if (!__timeout) wait = NULL; retval = 0; for (;;) { set_current_state(TASK_INTERRUPTIBLE); for (i = 0 ; i < n; i++) { unsigned long bit = BIT(i); unsigned long mask; struct file *file; off = i / __NFDBITS; if (!(bit & BITS(fds, off))) continue; file = fget(i); mask = POLLNVAL; if (file) { mask = DEFAULT_POLLMASK; if (file->f_op && file->f_op->poll) mask = file->f_op->poll(file, wait); fput(file); } if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) { SET(bit, __RES_IN(fds,off)); retval++; wait = NULL; } if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) { SET(bit, __RES_OUT(fds,off)); retval++; wait = NULL; } if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) { SET(bit, __RES_EX(fds,off)); retval++; wait = NULL; } } wait = NULL; if (retval || !__timeout || signal_pending(current)) break; if(table.error) { retval = table.error; break; } __timeout = schedule_timeout(__timeout); } current->state = TASK_RUNNING; poll_freewait(&table); /* * Up-to-date the caller timeout. */ *timeout = __timeout; return retval; }
asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout) { int i, j, fdcount, err; struct pollfd **fds; poll_table table, *wait; int nchunks, nleft; /* Do a sanity check on nfds ... */ if (nfds > current->files->max_fdset && nfds > OPEN_MAX) return -EINVAL; if (timeout) { /* Careful about overflow in the intermediate values */ if ((unsigned long) timeout < MAX_SCHEDULE_TIMEOUT / HZ) timeout = (unsigned long)(timeout*HZ+999)/1000+1; else /* Negative or overflow */ timeout = MAX_SCHEDULE_TIMEOUT; } poll_initwait(&table); wait = &table; if (!timeout) wait = NULL; err = -ENOMEM; fds = NULL; if (nfds != 0) { fds = (struct pollfd **)kmalloc( (1 + (nfds - 1) / POLLFD_PER_PAGE) * sizeof(struct pollfd *), GFP_KERNEL); if (fds == NULL) goto out; } nchunks = 0; nleft = nfds; while (nleft > POLLFD_PER_PAGE) { /* allocate complete PAGE_SIZE chunks */ fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); if (fds[nchunks] == NULL) goto out_fds; nchunks++; nleft -= POLLFD_PER_PAGE; } if (nleft) { /* allocate last PAGE_SIZE chunk, only nleft elements used */ fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); if (fds[nchunks] == NULL) goto out_fds; } err = -EFAULT; for (i=0; i < nchunks; i++) if (copy_from_user(fds[i], ufds + i*POLLFD_PER_PAGE, PAGE_SIZE)) goto out_fds1; if (nleft) { if (copy_from_user(fds[nchunks], ufds + nchunks*POLLFD_PER_PAGE, nleft * sizeof(struct pollfd))) goto out_fds1; } fdcount = do_poll(nfds, nchunks, nleft, fds, wait, timeout); /* OK, now copy the revents fields back to user space. */ for(i=0; i < nchunks; i++) for (j=0; j < POLLFD_PER_PAGE; j++, ufds++) __put_user((fds[i] + j)->revents, &ufds->revents); if (nleft) for (j=0; j < nleft; j++, ufds++) __put_user((fds[nchunks] + j)->revents, &ufds->revents); err = fdcount; if (!fdcount && signal_pending(current)) err = -EINTR; out_fds1: if (nleft) free_page((unsigned long)(fds[nchunks])); out_fds: for (i=0; i < nchunks; i++) free_page((unsigned long)(fds[i])); if (nfds != 0) kfree(fds); out: poll_freewait(&table); return err; }