/* * In case for some reason the CMOS clock has not already been running * in UTC, but in some local time: The first time we set the timezone, * we will warp the clock so that it is ticking UTC time instead of * local time. Presumably, if someone is setting the timezone then we * are running in an environment where the programs understand about * timezones. This should be done at boot time in the /etc/rc script, * as soon as possible, so that the clock can be set right. Otherwise, * various programs will get confused when the clock gets warped. */ asmlinkage int sys_settimeofday(struct timeval *tv, struct timezone *tz) { static int firsttime = 1; struct timeval new_tv; struct timezone new_tz; if (!suser()) return -EPERM; if (tv) { int error = verify_area(VERIFY_READ, tv, sizeof(*tv)); if (error) return error; memcpy_fromfs(&new_tv, tv, sizeof(*tv)); } if (tz) { int error = verify_area(VERIFY_READ, tz, sizeof(*tz)); if (error) return error; memcpy_fromfs(&new_tz, tz, sizeof(*tz)); } if (tz) { sys_tz = new_tz; if (firsttime) { firsttime = 0; if (!tv) warp_clock(); } } if (tv) do_settimeofday(&new_tv); return 0; }
int pmgr_write (int dev, struct fileinfo *file, const char *buf, int count) { unsigned long flags; if (count < 4) { printk ("PATMGR%d: Write count < 4\n", dev); return -(EIO); } memcpy_fromfs ((char *) mbox[dev], &(buf)[0], 4); if (*(unsigned char *) mbox[dev] == SEQ_FULLSIZE) { int tmp_dev; tmp_dev = ((unsigned short *) mbox[dev])[2]; if (tmp_dev != dev) return -(ENXIO); return synth_devs[dev]->load_patch (dev, *(unsigned short *) mbox[dev], buf, 4, count, 1); } if (count != sizeof (struct patmgr_info)) { printk ("PATMGR%d: Invalid write count\n", dev); return -(EIO); } /* * If everything went OK, there should be a preallocated buffer in the * mailbox and a client waiting. */ save_flags (flags); cli (); if (mbox[dev] && !msg_direction[dev]) { memcpy_fromfs (&((char *) mbox[dev])[4], &(buf)[4], count - 4); msg_direction[dev] = S_TO_A; if ((appl_wait_flag.flags & WK_SLEEP)) { { appl_wait_flag.flags = WK_WAKEUP; module_wake_up (&appl_proc); }; } } restore_flags (flags); return count; }
/* this will do terrible things if len + ipheader + devheader > dev->mtu */ static int packet_sendto (volatile struct sock *sk, unsigned char *from, int len, int noblock, unsigned flags, struct sockaddr_in *usin, int addr_len) { struct sk_buff *skb; struct device *dev; struct sockaddr saddr; /* check the flags. */ if (flags) return (-EINVAL); if (len < 0) return (-EINVAL); /* get and verify the address. */ if (usin) { if (addr_len < sizeof (saddr)) return (-EINVAL); /* verify_area (VERIFY_WRITE, usin, sizeof (saddr));*/ memcpy_fromfs (&saddr, usin, sizeof(saddr)); } else return (-EINVAL); skb = sk->prot->wmalloc (sk, len+sizeof (*skb), 0, GFP_KERNEL); /* this shouldn't happen, but it could. */ if (skb == NULL) { PRINTK (("packet_sendto: write buffer full?\n")); return (-EAGAIN); } skb->lock = 0; skb->mem_addr = skb; skb->mem_len = len + sizeof (*skb); skb->sk = sk; skb->free = 1; saddr.sa_data[13] = 0; dev = get_dev (saddr.sa_data); if (dev == NULL) { sk->prot->wfree (sk, skb->mem_addr, skb->mem_len); return (-ENXIO); } /* verify_area (VERIFY_WRITE, from, len);*/ memcpy_fromfs (skb+1, from, len); skb->len = len; skb->next = NULL; if (dev->up) dev->queue_xmit (skb, dev, sk->priority); else kfree_skb (skb, FREE_WRITE); return (len); }
static int ip_proto_bind (struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in addr; volatile struct sock *sk, *sk2; unsigned short snum; sk = sock->data; if (sk == NULL) { printk ("Warning: sock->data = NULL: %d\n" ,__LINE__); return (0); } /* check this error. */ if (sk->state != TCP_CLOSE) return (-EIO); verify_area (uaddr, addr_len); memcpy_fromfs (&addr, uaddr, min (sizeof (addr), addr_len)); if (addr.sin_family && addr.sin_family != AF_INET) return (-EIO); /* this needs to be changed. */ snum = net16(addr.sin_port); PRINTK ("bind sk =%X to port = %d\n", sk, snum); print_sk (sk); sk = sock->data; /* we can't just leave the socket bound wherever it is, it might be bound to a priveledged port. However, since there seems to be a bug here, we will leave it if the port is not priveledged(sp?) */ if (snum == 0) { if ( sk->num > PROT_SOCK) return (0); snum = get_new_socknum (sk->prot, 0); } if (snum <= PROT_SOCK && !suser()) return (-EPERM); if (my_ip_addr(addr.sin_addr.s_addr) || addr.sin_addr.s_addr == 0) sk->saddr = addr.sin_addr.s_addr; PRINTK ("sock_array[%d] = %X:\n", snum & (SOCK_ARRAY_SIZE -1), sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)]); print_sk (sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)]); /* make sure we are allowed to bind here. */ for (sk2 = sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)]; sk2 != NULL; sk2 = sk2->next) { if (sk2->num != snum) continue; if (sk2->saddr != sk->saddr) continue; if (!sk->reuse) return (-EADDRINUSE); if (!sk2->reuse) return (-EADDRINUSE); } remove_sock (sk); put_sock(snum, sk); sk->dummy_th.source = net16(sk->num); sk->daddr = 0; sk->dummy_th.dest = 0; return (0); }
/* * perform a connection. we can only connect to unix sockets (i can't for * the life of me find an application where that wouldn't be the case!) */ static int unix_proto_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len) { int i; struct unix_proto_data *serv_upd; struct sockaddr_un sockun; PRINTK("unix_proto_connect: socket 0x%x, servlen=%d\n", sock, sockaddr_len); if (sockaddr_len <= UN_PATH_OFFSET || sockaddr_len >= sizeof(struct sockaddr_un)) { PRINTK("unix_proto_connect: bad length %d\n", sockaddr_len); return -EINVAL; } verify_area(uservaddr, sockaddr_len); memcpy_fromfs(&sockun, uservaddr, sockaddr_len); if (sockun.sun_family != AF_UNIX) { PRINTK("unix_proto_connect: family is %d, not AF_UNIX (%d)\n", sockun.sun_family, AF_UNIX); return -EINVAL; } if (!(serv_upd = unix_data_lookup(&sockun, sockaddr_len))) { PRINTK("unix_proto_connect: can't locate peer\n"); return -EINVAL; } if ((i = sock_awaitconn(sock, serv_upd->socket)) < 0) { PRINTK("unix_proto_connect: can't await connection\n"); return i; } unix_data_ref(UN_DATA(sock->conn)); UN_DATA(sock)->peerupd = UN_DATA(sock->conn); /* ref server */ return 0; }
static int sscape_coproc_ioctl (void *dev_info, unsigned int cmd, caddr_t arg, int local) { switch (cmd) { case SNDCTL_COPR_RESET: sscape_coproc_reset (dev_info); return 0; break; case SNDCTL_COPR_LOAD: { copr_buffer *buf; int err; buf = (copr_buffer *) vmalloc (sizeof (copr_buffer)); if (buf == NULL) return -(ENOSPC); memcpy_fromfs ((char *) buf, &((char *) arg)[0], sizeof (*buf)); err = download_boot_block (dev_info, buf); vfree (buf); return err; } break; default: return -(EINVAL); } }
int fcntl_getlk(unsigned int fd, struct flock *l) { int error; struct flock flock; struct file *filp; struct file_lock *fl,file_lock; if (fd >= NR_OPEN || !(filp = current->files->fd[fd])) return -EBADF; error = verify_area(VERIFY_WRITE,l, sizeof(*l)); if (error) return error; memcpy_fromfs(&flock, l, sizeof(flock)); if (flock.l_type == F_UNLCK) return -EINVAL; if (!copy_flock(filp, &file_lock, &flock, fd)) return -EINVAL; for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) { if (conflict(&file_lock, fl)) { flock.l_pid = fl->fl_owner->pid; flock.l_start = fl->fl_start; flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; flock.l_whence = fl->fl_whence; flock.l_type = fl->fl_type; memcpy_tofs(l, &flock, sizeof(flock)); return 0; } } flock.l_type = F_UNLCK; /* no conflict found */ memcpy_tofs(l, &flock, sizeof(flock)); return 0; }
asmlinkage int sys_setrlimit(unsigned int resource, struct rlimit *rlim) { struct rlimit new_rlim, *old_rlim; int err; if (resource >= RLIM_NLIMITS) return -EINVAL; err = verify_area(VERIFY_READ, rlim, sizeof(*rlim)); if (err) return err; memcpy_fromfs(&new_rlim, rlim, sizeof(*rlim)); if (new_rlim.rlim_cur < 0 || new_rlim.rlim_max < 0) return -EINVAL; old_rlim = current->rlim + resource; if (((new_rlim.rlim_cur > old_rlim->rlim_max) || (new_rlim.rlim_max > old_rlim->rlim_max)) && !suser()) return -EPERM; if (resource == RLIMIT_NOFILE) { if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN) return -EPERM; } *old_rlim = new_rlim; return 0; }
static int lirc_write(struct inode *node, struct file *file, const char *buf, int n) #endif { #ifdef LIRC_SERIAL_TRANSMITTER int retval,i,count; unsigned long flags; if(n%sizeof(lirc_t)) return(-EINVAL); retval=verify_area(VERIFY_READ,buf,n); if(retval) return(retval); count=n/sizeof(lirc_t); if(count>WBUF_LEN || count%2==0) return(-EINVAL); # ifdef KERNEL_2_1 copy_from_user(wbuf,buf,n); # else memcpy_fromfs(wbuf,buf,n); # endif save_flags(flags);cli(); # ifdef LIRC_SERIAL_IRDEO /* DTR, RTS down */ on(); # endif for(i=0;i<count;i++) { if(i%2) send_space(wbuf[i]); else send_pulse(wbuf[i]); } off(); restore_flags(flags); return(n); #else return(-EBADF); #endif }
static int eql_emancipate(struct device *dev, slaving_request_t *srqp) { struct device *master_dev; struct device *slave_dev; slaving_request_t srq; int err; err = verify_area(VERIFY_READ, (void *)srqp, sizeof (slaving_request_t)); if (err) return err; memcpy_fromfs (&srq, srqp, sizeof (slaving_request_t)); #ifdef EQL_DEBUG if (eql_debug >= 20) printk ("%s: emancipate `%s`\n", dev->name, srq.slave_name); #endif master_dev = dev; /* for "clarity" */ slave_dev = dev_get (srq.slave_name); if ( eql_is_slave (slave_dev) ) /* really is a slave */ { equalizer_t *eql = (equalizer_t *) master_dev->priv; slave_dev->flags = slave_dev->flags & ~IFF_SLAVE; eql_remove_slave_dev (eql->queue, slave_dev); return 0; } return -EINVAL; }
/* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ asmlinkage int osf_usleep_thread(struct timeval * sleep, struct timeval * remain) { struct timeval tmp; unsigned long ticks; int retval; retval = verify_area(VERIFY_READ, sleep, sizeof(*sleep)); if (retval) return retval; if (remain && (retval = verify_area(VERIFY_WRITE, remain, sizeof(*remain)))) return retval; memcpy_fromfs(&tmp, sleep, sizeof(*sleep)); ticks = tmp.tv_usec; ticks = (ticks + (1000000 / HZ) - 1) / (1000000 / HZ); ticks += tmp.tv_sec * HZ; current->timeout = ticks + jiffies; current->state = TASK_INTERRUPTIBLE; schedule(); if (!remain) return 0; ticks = jiffies; if (ticks < current->timeout) ticks = current->timeout - ticks; else ticks = 0; current->timeout = 0; tmp.tv_sec = ticks / HZ; tmp.tv_usec = ticks % HZ; memcpy_tofs(remain, &tmp, sizeof(*remain)); return 0; }
static int eql_s_slave_cfg(struct device *dev, slave_config_t *scp) { slave_t *slave; equalizer_t *eql; struct device *slave_dev; slave_config_t sc; int err; err = verify_area(VERIFY_READ, (void *)scp, sizeof (slave_config_t)); if (err) return err; #ifdef EQL_DEBUG if (eql_debug >= 20) printk ("%s: set config for slave `%s'\n", dev->name, sc.slave_name); #endif memcpy_fromfs (&sc, scp, sizeof (slave_config_t)); eql = (equalizer_t *) dev->priv; slave_dev = dev_get (sc.slave_name); if ( eql_is_slave (slave_dev) ) { slave = eql_find_slave_dev (eql->queue, slave_dev); if (slave != 0) { slave->priority = sc.priority; slave->priority_bps = sc.priority; slave->priority_Bps = sc.priority / 8; return 0; } } return -EINVAL; }
static int copy_mount_options (const void * data, unsigned long *where) { int i; unsigned long page; struct vm_area_struct * vma; *where = 0; if (!data) return 0; vma = find_vma(current, (unsigned long) data); if (!vma || (unsigned long) data < vma->vm_start) return -EFAULT; if (!(vma->vm_flags & VM_READ)) return -EFAULT; i = vma->vm_end - (unsigned long) data; if (PAGE_SIZE <= (unsigned long) i) i = PAGE_SIZE-1; if (!(page = __get_free_page(GFP_KERNEL))) { return -ENOMEM; } memcpy_fromfs((void *) page,data,i); *where = page; return 0; }
static int mem_write(struct inode * inode, struct file * file,char * buf, int count) { struct task_struct * tsk; unsigned long addr; char *tmp; int i; if (count < 0) return -EINVAL; addr = file->f_pos; tsk = get_task(inode->i_ino >> 16); if (!tsk) return -ESRCH; tmp = buf; while (count > 0) { if (current->signal & ~current->blocked) break; i = count; memcpy_fromfs((void*)addr, tmp, i); addr += i; tmp += i; count -= i; } file->f_pos = addr; if (tmp != buf) return tmp-buf; if (current->signal & ~current->blocked) return -ERESTARTSYS; return 0; }
unsigned char get_user_char(void *dv) { unsigned char retv; memcpy_fromfs(&retv,dv,1); return retv; }
unsigned short int get_user(void *dv) { unsigned short int retv; memcpy_fromfs(&retv,dv,2); return retv; }
static int set_termios(struct tty_struct * tty, struct termios * termios, int channel) { struct termios tmp_termios; memcpy_fromfs(&tmp_termios, termios, sizeof (struct termios)); return set_termios_2(tty, &tmp_termios); }
unsigned long int get_user_long(void *dv) { unsigned long retv; memcpy_fromfs(&retv,dv,4); return retv; }
extern inline int copy_from_user(void *to, const void *from, unsigned long n) { int i = verify_area(VERIFY_READ, from, n); if (i) return i; memcpy_fromfs(to, from, n); return 0; }
/* set inwordLut contents. Invoked by ioctl(). */ int sel_loadlut(const unsigned long arg) { int i = verify_area(VERIFY_READ, (char *) arg, 36); if (i) return i; memcpy_fromfs(inwordLut, (u32 *)(arg+4), 32); return 0; }
static int rs_write(struct tty_struct * tty, int from_user, const unsigned char *buf, int count) { int c, total = 0; struct LEON_serial *info = (struct LEON_serial *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->device, "rs_write")) return 0; if (!tty || !info->xmit_buf) return 0; save_flags(flags); while (1) { cli(); c = MIN(count, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, SERIAL_XMIT_SIZE - info->xmit_head)); if (c <= 0) break; if (from_user) { down(&tmp_buf_sem); memcpy_fromfs(tmp_buf, buf, c); c = MIN(c, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, SERIAL_XMIT_SIZE - info->xmit_head)); memcpy(info->xmit_buf + info->xmit_head, tmp_buf, c); up(&tmp_buf_sem); } else memcpy(info->xmit_buf + info->xmit_head, buf, c); info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1); info->xmit_cnt += c; restore_flags(flags); buf += c; count -= c; total += c; } #if 0 cli(); if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { /* Enable transmitter */ leon->uartctrl1 |= UCTRL_TE | UCTRL_TI; } #endif #if 1 cli(); while (info->xmit_cnt > 0) { if (leon->uartstatus1 & USTAT_TH) { leon->uartdata1 = info->xmit_buf[info->xmit_tail++]; info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); info->xmit_cnt--; } } #endif restore_flags(flags); return total; }
static int mem_write(struct inode * inode, struct file * file,char * buf, int count) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; char * page; struct task_struct * tsk; unsigned long addr; char *tmp; int i; if (count < 0) return -EINVAL; addr = file->f_pos; tsk = get_task(inode->i_ino >> 16); if (!tsk) return -ESRCH; tmp = buf; while (count > 0) { if (current->signal & ~current->blocked) break; page_dir = pgd_offset(tsk,addr); if (pgd_none(*page_dir)) break; if (pgd_bad(*page_dir)) { printk("Bad page dir entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); break; } page_middle = pmd_offset(page_dir,addr); if (pmd_none(*page_middle)) break; if (pmd_bad(*page_middle)) { printk("Bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); break; } pte = *pte_offset(page_middle,addr); if (!pte_present(pte)) break; if (!pte_write(pte)) break; page = (char *) pte_page(pte) + (addr & ~PAGE_MASK); i = PAGE_SIZE-(addr & ~PAGE_MASK); if (i > count) i = count; memcpy_fromfs(page, tmp, i); addr += i; tmp += i; count -= i; } file->f_pos = addr; if (tmp != buf) return tmp-buf; if (current->signal & ~current->blocked) return -ERESTARTSYS; return 0; }
static int minix_file_write(struct inode * inode, struct file * filp, const char * buf, int count) { off_t pos; int written,c; struct buffer_head * bh; char * p; if (!inode) { printk("minix_file_write: inode = NULL\n"); return -EINVAL; } if (!S_ISREG(inode->i_mode)) { printk("minix_file_write: mode = %07o\n",inode->i_mode); return -EINVAL; } if (filp->f_flags & O_APPEND) pos = inode->i_size; else pos = filp->f_pos; written = 0; while (written < count) { bh = minix_getblk(inode,pos/BLOCK_SIZE,1); if (!bh) { if (!written) written = -ENOSPC; break; } c = BLOCK_SIZE - (pos % BLOCK_SIZE); if (c > count-written) c = count-written; if (c != BLOCK_SIZE && !buffer_uptodate(bh)) { ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { brelse(bh); if (!written) written = -EIO; break; } } p = (pos % BLOCK_SIZE) + bh->b_data; memcpy_fromfs(p,buf,c); update_vm_cache(inode, pos, p, c); mark_buffer_uptodate(bh, 1); mark_buffer_dirty(bh, 0); brelse(bh); pos += c; written += c; buf += c; } if (pos > inode->i_size) inode->i_size = pos; inode->i_mtime = inode->i_ctime = CURRENT_TIME; filp->f_pos = pos; inode->i_dirt = 1; return written; }
static inline int *xdr_encode_data(int *p, char *data, int len) { int quadlen = QUADLEN(len); p[quadlen] = 0; *p++ = htonl(len); memcpy_fromfs(p, data, len); return p + quadlen; }
/* * 'copy_string()' copies argument/envelope strings from user * memory to free pages in kernel mem. These are in a format ready * to be put directly into the top of new user memory. * * Modified by TYT, 11/24/91 to add the from_kmem argument, which specifies * whether the string and the string array are from user or kernel segments: * * from_kmem argv * argv ** * 0 user space user space * 1 kernel space user space * 2 kernel space kernel space * * We do this by playing games with the fs segment register. Since it * is expensive to load a segment register, we try to avoid calling * set_fs() unless we absolutely have to. */ unsigned long copy_strings(int argc,char ** argv,unsigned long *page, unsigned long p, int from_kmem) { char *tmp, *pag = NULL; int len, offset = 0; unsigned long old_fs, new_fs; if ((long)p <= 0) return p; /* bullet-proofing */ new_fs = get_ds(); old_fs = get_fs(); if (from_kmem==2) set_fs(new_fs); while (argc-- > 0) { if (from_kmem == 1) set_fs(new_fs); if (!(tmp = get_user(argv+argc))) panic("VFS: argc is wrong"); if (from_kmem == 1) set_fs(old_fs); len = count(tmp, 1, p); if (len < 0 || len >= p) { /* EFAULT or E2BIG */ set_fs(old_fs); return len < 0 ? len : -E2BIG; } tmp += ++len; while (len) { --p; --tmp; --len; if (--offset < 0) { offset = p % PAGE_SIZE; if (from_kmem==2) set_fs(old_fs); if (!(pag = (char *) page[p/PAGE_SIZE]) && !(pag = (char *) page[p/PAGE_SIZE] = (unsigned long *) get_free_page(GFP_USER))) return -EFAULT; if (from_kmem==2) set_fs(new_fs); } if (len == 0 || offset == 0) *(pag + offset) = get_user(tmp); else { int bytes_to_copy = (len > offset) ? offset : len; tmp -= bytes_to_copy; p -= bytes_to_copy; offset -= bytes_to_copy; len -= bytes_to_copy; memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); } } } if (from_kmem==2) set_fs(old_fs); return p; }
/* * 'copy_string()' copies argument/envelope strings from user * memory to free pages in kernel mem. These are in a format ready * to be put directly into the top of new user memory. * * Modified by TYT, 11/24/91 to add the from_kmem argument, which specifies * whether the string and the string array are from user or kernel segments: * * from_kmem argv * argv ** * 0 user space user space * 1 kernel space user space * 2 kernel space kernel space * * We do this by playing games with the fs segment register. Since it * is expensive to load a segment register, we try to avoid calling * set_fs() unless we absolutely have to. */ unsigned long copy_strings(int argc,char ** argv,unsigned long *page, unsigned long p, int from_kmem) { char *tmp, *tmp1, *pag = NULL; int len, offset = 0; unsigned long old_fs, new_fs; if (!p) return 0; /* bullet-proofing */ new_fs = get_ds(); old_fs = get_fs(); if (from_kmem==2) set_fs(new_fs); while (argc-- > 0) { if (from_kmem == 1) set_fs(new_fs); if (!(tmp1 = tmp = get_user(argv+argc))) panic("VFS: argc is wrong"); if (from_kmem == 1) set_fs(old_fs); while (get_user(tmp++)); len = tmp - tmp1; if (p < len) { /* this shouldn't happen - 128kB */ set_fs(old_fs); return 0; } while (len) { --p; --tmp; --len; if (--offset < 0) { offset = p % PAGE_SIZE; if (from_kmem==2) set_fs(old_fs); if (!(pag = (char *) page[p/PAGE_SIZE]) && !(pag = (char *) page[p/PAGE_SIZE] = (unsigned long *) get_free_page(GFP_USER))) return 0; if (from_kmem==2) set_fs(new_fs); } if (len == 0 || offset == 0) *(pag + offset) = get_user(tmp); else { int bytes_to_copy = (len > offset) ? offset : len; tmp -= bytes_to_copy; p -= bytes_to_copy; offset -= bytes_to_copy; len -= bytes_to_copy; memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); } } } if (from_kmem==2) set_fs(old_fs); return p; }
int sys_mmap(struct mmap_arg_struct *arg) { int error; struct mmap_arg_struct a; error = verify_area(VERIFY_READ, arg, sizeof(*arg)); if (error) return error; memcpy_fromfs(&a, arg, sizeof(*arg)); return do_mmap(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); }
static int set_window_size(struct tty_struct * tty, struct winsize * ws) { struct winsize tmp_ws; memcpy_fromfs(&tmp_ws, ws, sizeof (struct winsize)); if (memcmp(&tmp_ws, &tty->winsize, sizeof (struct winsize)) && tty->pgrp > 0) kill_pg(tty->pgrp, SIGWINCH, 1); tty->winsize = tmp_ws; return 0; }
int verified_memcpy_fromfs(void *daddr, void *saddr, size_t len) { int err = verify_area(VERIFY_READ, saddr, len); if (err) return err; memcpy_fromfs(daddr, saddr, len); return 0; }
static int eql_enslave(struct device *dev, slaving_request_t *srqp) { struct device *master_dev; struct device *slave_dev; slaving_request_t srq; int err; err = verify_area(VERIFY_READ, (void *)srqp, sizeof (slaving_request_t)); if (err) { #ifdef EQL_DEBUG if (eql_debug >= 20) printk ("EQL enslave: error detected by verify_area\n"); #endif return err; } memcpy_fromfs (&srq, srqp, sizeof (slaving_request_t)); #ifdef EQL_DEBUG if (eql_debug >= 20) printk ("%s: enslave '%s' %ld bps\n", dev->name, srq.slave_name, srq.priority); #endif master_dev = dev; /* for "clarity" */ slave_dev = dev_get (srq.slave_name); if (master_dev != 0 && slave_dev != 0) { if (! eql_is_master (slave_dev) && /* slave is not a master */ ! eql_is_slave (slave_dev) ) /* slave is not already a slave */ { slave_t *s = eql_new_slave (); equalizer_t *eql = (equalizer_t *) master_dev->priv; s->dev = slave_dev; s->priority = srq.priority; s->priority_bps = srq.priority; s->priority_Bps = srq.priority / 8; slave_dev->flags |= IFF_SLAVE; eql_insert_slave (eql->queue, s); return 0; } #ifdef EQL_DEBUG if (eql_debug >= 20) printk ("EQL enslave: slave is master or slave is already slave\n"); #endif return -EINVAL; } #ifdef EQL_DEBUG if (eql_debug >= 20) printk ("EQL enslave: master or slave are NULL"); #endif return -EINVAL; }