static int scribe_getname(struct socket *sock, struct sockaddr *addr, int *sockaddr_len, int peer) { struct scribe_ps *scribe = current->scribe; int ret, err; if (!is_scribed(scribe)) return sock->real_ops->getname(sock, addr, sockaddr_len, peer); err = scribe_result( ret, sock->real_ops->getname(sock, addr, sockaddr_len, peer)); if (err) goto out; if (ret < 0) goto out; err = scribe_value(sockaddr_len); if (err) goto out; err = scribe_buffer(addr, *sockaddr_len); out: if (err) { scribe_kill(scribe->ctx, err); return err; } return ret; }
void scribe_pre_fput(struct file *file, struct scribe_fput_context *fput_ctx) { bool sync_fput = false; struct scribe_ps *scribe = current->scribe; struct scribe_lock_region *lock_region; struct scribe_res_user *user; fput_ctx->lock_region = NULL; if (!is_scribed(scribe)) return; user = &scribe->resources; lock_region = scribe_find_lock_region(user, file); if (file->f_op->scribe_sync_fput) sync_fput = file->f_op->scribe_sync_fput(file); /* * We don't need to sync fput, so we can unlock before fput(). */ if (!lock_region) return; if (lock_region->flags & SCRIBE_IMPLICIT_UNLOCK) __scribe_unlock_region(scribe, lock_region, false); }
static ssize_t scribe_do_write(struct file *file, const char __user *buf, ssize_t count, loff_t *ppos) { struct scribe_ps *scribe = current->scribe; int force_block = 0; ssize_t ret; if (!is_scribed(scribe)) return do_write(file, buf, count, ppos, force_block); if (is_kernel_copy()) goto out; if (!should_scribe_data(scribe)) goto out; scribe_need_syscall_ret(scribe); if (is_replaying(scribe)) { count = scribe->orig_ret; if (count <= 0) return count; force_block = 1; } out: scribe->in_read_write = true; ret = do_write(file, buf, count, ppos, force_block); scribe->in_read_write = false; return ret; }
static int scribe_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct scribe_ps *scribe = current->scribe; if (!is_scribed(scribe)) return sock->real_ops->mmap(file, sock , vma); return sock_no_mmap(file, sock, vma); }
void scribe_pre_uaccess(const void *data, const void __user *user_ptr, size_t size, int flags) { struct scribe_ps *scribe = current->scribe; if (!is_scribed(scribe) || !should_scribe_data(scribe)) return; if (!is_kernel_copy() && size) __scribe_allow_uaccess(scribe); scribe->to_be_copied_size = size; }
static bool scribe_is_deterministic(struct socket *sock) { struct scribe_ps *scribe = current->scribe; if (!is_scribed(scribe)) return false; /* * We need to save the value because when the peer disconnect we have * no way to know after the fact if the socket was deterministic or * not. */ return sock->sk->sk_scribe_deterministic; }
static int scribe_shutdown(struct socket *sock, int flags) { struct scribe_ps *scribe = current->scribe; int ret, err; if (scribe_is_deterministic(sock) || !is_scribed(scribe)) return sock->real_ops->shutdown(sock, flags); err = scribe_result( ret, sock->real_ops->shutdown(sock, flags)); return err ?: ret; }
static int scribe_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct scribe_ps *scribe = current->scribe; int ret, err; if (scribe_is_deterministic(sock) || !is_scribed(scribe)) { if (is_replaying(scribe)) { flags &= ~MSG_DONTWAIT; flags |= MSG_WAITALL; if (!scribe_is_in_read_write(scribe)) total_len = scribe->orig_ret; if ((ssize_t)total_len <= 0) return total_len; } scribe_data_det(); ret = sock->real_ops->recvmsg(iocb, sock, m, total_len, flags), scribe_data_pop_flags(); return ret; } scribe_data_non_det_need_info(); err = scribe_result_cond( ret, sock->real_ops->recvmsg(iocb, sock, m, total_len, flags), !scribe_is_in_read_write(scribe) || ret > 0); if (err) goto out; if (ret <= 0) goto out; if (is_replaying(scribe)) scribe_emul_copy_to_user(scribe, NULL, INT_MAX); err = scribe_value(&m->msg_namelen); if (err) goto out; err = scribe_buffer(m->msg_name, m->msg_namelen); out: scribe_data_pop_flags(); return err ?: ret; }
/* * XXX sys_accept() doesn't call this function */ int scribe_interpose_socket(struct socket *sock) { struct scribe_ps *scribe = current->scribe; if (!is_scribed(scribe)) return 0; /* TODO We should revert the ops to real_ops when the context dies */ sock->real_ops = sock->ops; sock->ops = &scribe_ops; /* * The values of sock->sk->sk_scribe_ctx and sk_scribe_deterministic * are already set in sk_alloc */ return 0; }
void scribe_pre_fget(struct files_struct *files, int *lock_flags) { struct scribe_ps *scribe = current->scribe; *lock_flags = 0; if (!is_scribed(scribe)) return; if (scribe->lock_next_file) { *lock_flags = scribe->lock_next_file | SCRIBE_IMPLICIT_UNLOCK; scribe->lock_next_file = 0; /* * We need to lock the files_struct while doing fcheck_files() * to guards against races with fd_install() */ scribe_lock_files_read(files); } }
static ssize_t scribe_do_read(struct file *file, char __user *buf, ssize_t len, loff_t *ppos) { struct scribe_ps *scribe = current->scribe; int force_block = 0; ssize_t ret; if (!is_scribed(scribe)) return do_read(file, buf, len, ppos, force_block); if (is_kernel_copy()) goto out; if (!should_scribe_data(scribe)) goto out; scribe_need_syscall_ret(scribe); if (is_replaying(scribe)) { len = scribe->orig_ret; if (len <= 0) return len; force_block = 1; } if (is_deterministic(file)) goto out; scribe_data_non_det(); if (is_recording(scribe)) goto out; return scribe_emul_copy_to_user(scribe, buf, len); out: scribe->in_read_write = true; ret = do_read(file, buf, len, ppos, force_block); scribe->in_read_write = false; return ret; }
void scribe_prepare_data_event(size_t pre_alloc_size) { struct scribe_ps *scribe = current->scribe; struct data_desc desc; if (!is_scribed(scribe)) return; desc.data = NULL; desc.user_ptr = NULL; desc.size = pre_alloc_size; desc.flags = scribe->data_flags; desc.event.generic = NULL; post_init_data_desc(scribe, &desc); if (!need_action(scribe, &desc)) return; if (get_data_event(scribe, &desc)) return; scribe->prepared_data_event = desc.event; }
static int scribe_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct scribe_ps *scribe = current->scribe; int ret, err; if (scribe_is_deterministic(sock) || !is_scribed(scribe)) { if (is_replaying(scribe)) { m->msg_flags &= ~MSG_DONTWAIT; if (!scribe_is_in_read_write(scribe)) total_len = scribe->orig_ret; if ((ssize_t)total_len <= 0) return total_len; } ret = sock->real_ops->sendmsg(iocb, sock, m, total_len); return ret; } scribe_data_need_info(); err = scribe_result_cond( ret, sock->real_ops->sendmsg(iocb, sock, m, total_len), !scribe_is_in_read_write(scribe) || ret > 0); if (err) goto out; if (ret <= 0) goto out; if (is_replaying(scribe)) scribe_emul_copy_from_user(scribe, NULL, INT_MAX); out: scribe_data_pop_flags(); return err ?: ret; }
static ssize_t scribe_do_readv_writev(int type, struct file *file, const struct iovec __user * uvector, unsigned long nr_segs, loff_t *pos) { struct scribe_ps *scribe = current->scribe; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; int force_block = 0; ssize_t ret, len = 0; if (!is_scribed(scribe)) return do_readv_writev(type, file, uvector, nr_segs, 0, pos, force_block); if (is_kernel_copy()) goto out; if (!should_scribe_data(scribe)) goto out; scribe_need_syscall_ret(scribe); if (is_replaying(scribe)) { len = scribe->orig_ret; if (len <= 0) { rw_copy_check_uvector(type, uvector, nr_segs, ARRAY_SIZE(iovstack), iovstack, &iov); ret = len; goto free; } force_block = 1; } if (type == READ) { if (is_deterministic(file)) goto out; scribe_data_non_det(); if (is_recording(scribe)) goto out; rw_copy_check_uvector(type, uvector, nr_segs, ARRAY_SIZE(iovstack), iovstack, &iov); ret = __do_loop_readv_writev(file, iov, nr_segs, len, pos, io_scribe_emul_copy_to_user); goto free; } out: scribe->in_read_write = true; ret = do_readv_writev(type, file, uvector, nr_segs, len, pos, force_block); scribe->in_read_write = false; free: if (iov != iovstack) kfree(iov); return ret; }