/*! Duplicates an FD from another team to this/the kernel team. \param fromTeam The team which owns the FD. \param fd The FD to duplicate. \param kernel If \c true, the new FD will be created in the kernel team, the current userland team otherwise. \return The newly created FD or an error code, if something went wrong. */ int dup_foreign_fd(team_id fromTeam, int fd, bool kernel) { // get the I/O context for the team in question InterruptsSpinLocker teamsLocker(gTeamSpinlock); struct team* team = team_get_team_struct_locked(fromTeam); if (team == NULL) return B_BAD_TEAM_ID; io_context* fromContext = team->io_context; vfs_get_io_context(fromContext); teamsLocker.Unlock(); CObjectDeleter<io_context> _(fromContext, vfs_put_io_context); // get the file descriptor file_descriptor* descriptor = get_fd(fromContext, fd); if (descriptor == NULL) return B_FILE_ERROR; CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd); // create a new FD in the target I/O context int result = new_fd(get_current_io_context(kernel), descriptor); if (result >= 0) { // the descriptor reference belongs to the slot, now descriptorPutter.Detach(); } return result; }
fssh_ssize_t _kern_write(int fd, fssh_off_t pos, const void *buffer, fssh_size_t length) { struct file_descriptor *descriptor; fssh_ssize_t bytesWritten; descriptor = get_fd(get_current_io_context(true), fd); if (descriptor == NULL) return FSSH_B_FILE_ERROR; if ((descriptor->open_mode & FSSH_O_RWMASK) == FSSH_O_RDONLY) { put_fd(descriptor); return FSSH_B_FILE_ERROR; } if (pos == -1) pos = descriptor->pos; if (descriptor->ops->fd_write) { bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length); if (bytesWritten >= FSSH_B_OK) { if (length > FSSH_SSIZE_MAX) bytesWritten = FSSH_SSIZE_MAX; else bytesWritten = (fssh_ssize_t)length; descriptor->pos = pos + length; } } else bytesWritten = FSSH_B_BAD_VALUE; put_fd(descriptor); return bytesWritten; }
static int dup_fd(int fd, bool kernel) { struct io_context* context = get_current_io_context(kernel); struct file_descriptor* descriptor; int status; TRACE(("dup_fd: fd = %d\n", fd)); // Try to get the fd structure descriptor = get_fd(context, fd); if (descriptor == NULL) return B_FILE_ERROR; // now put the fd in place status = new_fd(context, descriptor); if (status < 0) put_fd(descriptor); else { mutex_lock(&context->io_mutex); fd_set_close_on_exec(context, status, false); mutex_unlock(&context->io_mutex); } return status; }
/*! Duplicates an FD from another team to this/the kernel team. \param fromTeam The team which owns the FD. \param fd The FD to duplicate. \param kernel If \c true, the new FD will be created in the kernel team, the current userland team otherwise. \return The newly created FD or an error code, if something went wrong. */ int dup_foreign_fd(team_id fromTeam, int fd, bool kernel) { // get the I/O context for the team in question Team* team = Team::Get(fromTeam); if (team == NULL) return B_BAD_TEAM_ID; BReference<Team> teamReference(team, true); io_context* fromContext = team->io_context; // get the file descriptor file_descriptor* descriptor = get_fd(fromContext, fd); if (descriptor == NULL) return B_FILE_ERROR; CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd); // create a new FD in the target I/O context int result = new_fd(get_current_io_context(kernel), descriptor); if (result >= 0) { // the descriptor reference belongs to the slot, now descriptorPutter.Detach(); } return result; }
static int create_socket_fd(net_socket* socket, bool kernel) { // Get the socket's non-blocking flag, so we can set the respective // open mode flag. int32 nonBlock; socklen_t nonBlockLen = sizeof(int32); status_t error = sStackInterface->getsockopt(socket, SOL_SOCKET, SO_NONBLOCK, &nonBlock, &nonBlockLen); if (error != B_OK) return error; // allocate a file descriptor file_descriptor* descriptor = alloc_fd(); if (descriptor == NULL) return B_NO_MEMORY; // init it descriptor->type = FDTYPE_SOCKET; descriptor->ops = &sSocketFDOps; descriptor->u.socket = socket; descriptor->open_mode = O_RDWR | (nonBlock ? O_NONBLOCK : 0); // publish it int fd = new_fd(get_current_io_context(kernel), descriptor); if (fd < 0) free(descriptor); return fd; }
ssize_t _kern_read_dir(int fd, struct dirent* buffer, size_t bufferSize, uint32 maxCount) { struct file_descriptor* descriptor; ssize_t retval; TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = " "%lu)\n",fd, buffer, bufferSize, maxCount)); struct io_context* ioContext = get_current_io_context(true); descriptor = get_fd(ioContext, fd); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return B_FILE_ERROR; if (descriptor->ops->fd_read_dir) { uint32 count = maxCount; retval = descriptor->ops->fd_read_dir(ioContext, descriptor, buffer, bufferSize, &count); if (retval >= 0) retval = count; } else retval = B_UNSUPPORTED; put_fd(descriptor); return retval; }
/*! POSIX says this should be the same as: close(newfd); fcntl(oldfd, F_DUPFD, newfd); We do dup2() directly to be thread-safe. */ static int dup2_fd(int oldfd, int newfd, bool kernel) { struct file_descriptor* evicted = NULL; struct io_context* context; TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd, newfd)); // quick check if (oldfd < 0 || newfd < 0) return B_FILE_ERROR; // Get current I/O context and lock it context = get_current_io_context(kernel); mutex_lock(&context->io_mutex); // Check if the fds are valid (mutex must be locked because // the table size could be changed) if ((uint32)oldfd >= context->table_size || (uint32)newfd >= context->table_size || context->fds[oldfd] == NULL || (context->fds[oldfd]->open_mode & O_DISCONNECTED) != 0) { mutex_unlock(&context->io_mutex); return B_FILE_ERROR; } // Check for identity, note that it cannot be made above // because we always want to return an error on invalid // handles select_info* selectInfos = NULL; if (oldfd != newfd) { // Now do the work TFD(Dup2FD(context, oldfd, newfd)); evicted = context->fds[newfd]; selectInfos = context->select_infos[newfd]; context->select_infos[newfd] = NULL; atomic_add(&context->fds[oldfd]->ref_count, 1); atomic_add(&context->fds[oldfd]->open_count, 1); context->fds[newfd] = context->fds[oldfd]; if (evicted == NULL) context->num_used_fds++; } fd_set_close_on_exec(context, newfd, false); mutex_unlock(&context->io_mutex); // Say bye bye to the evicted fd if (evicted) { deselect_select_infos(evicted, selectInfos, true); close_fd(evicted); put_fd(evicted); } return newfd; }
bool fd_is_valid(int fd, bool kernel) { struct file_descriptor *descriptor = get_fd(get_current_io_context(kernel), fd); if (descriptor == NULL) return false; put_fd(descriptor); return true; }
/*! This function checks if the specified fd is valid in the current context. It can be used for a quick check; the fd is not locked so it could become invalid immediately after this check. */ bool fd_is_valid(int fd, bool kernel) { struct file_descriptor* descriptor = get_fd(get_current_io_context(kernel), fd); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return false; put_fd(descriptor); return true; }
ssize_t _user_read_dir(int fd, struct dirent* userBuffer, size_t bufferSize, uint32 maxCount) { TRACE(("user_read_dir(fd = %d, userBuffer = %p, bufferSize = %ld, count = " "%lu)\n", fd, userBuffer, bufferSize, maxCount)); if (maxCount == 0) return 0; if (userBuffer == NULL || !IS_USER_ADDRESS(userBuffer)) return B_BAD_ADDRESS; // get I/O context and FD io_context* ioContext = get_current_io_context(false); FDGetter fdGetter; struct file_descriptor* descriptor = fdGetter.SetTo(ioContext, fd, false); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return B_FILE_ERROR; if (descriptor->ops->fd_read_dir == NULL) return B_UNSUPPORTED; // restrict buffer size and allocate a heap buffer if (bufferSize > kMaxReadDirBufferSize) bufferSize = kMaxReadDirBufferSize; struct dirent* buffer = (struct dirent*)malloc(bufferSize); if (buffer == NULL) return B_NO_MEMORY; MemoryDeleter bufferDeleter(buffer); // read the directory uint32 count = maxCount; status_t status = descriptor->ops->fd_read_dir(ioContext, descriptor, buffer, bufferSize, &count); if (status != B_OK) return status; // copy the buffer back -- determine the total buffer size first size_t sizeToCopy = 0; struct dirent* entry = buffer; for (uint32 i = 0; i < count; i++) { size_t length = entry->d_reclen; sizeToCopy += length; entry = (struct dirent*)((uint8*)entry + length); } if (user_memcpy(userBuffer, buffer, sizeToCopy) != B_OK) return B_BAD_ADDRESS; return count; }
static status_t get_socket_descriptor(int fd, bool kernel, file_descriptor*& descriptor) { if (fd < 0) return EBADF; descriptor = get_fd(get_current_io_context(kernel), fd); if (descriptor == NULL) return EBADF; if (descriptor->type != FDTYPE_SOCKET) { put_fd(descriptor); return ENOTSOCK; } return B_OK; }
off_t _kern_seek(int fd, off_t pos, int seekType) { struct file_descriptor* descriptor; descriptor = get_fd(get_current_io_context(true), fd); if (!descriptor) return B_FILE_ERROR; if (descriptor->ops->fd_seek) pos = descriptor->ops->fd_seek(descriptor, pos, seekType); else pos = ESPIPE; put_fd(descriptor); return pos; }
status_t deselect_fd(int32 fd, struct select_info* info, bool kernel) { TRACE(("deselect_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd, info, info->sync, info->selected_events)); FDGetter fdGetter; // define before the context locker, so it will be destroyed after it io_context* context = get_current_io_context(kernel); MutexLocker locker(context->io_mutex); struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true); if (descriptor == NULL) return B_FILE_ERROR; // remove the info from the IO context select_info** infoLocation = &context->select_infos[fd]; while (*infoLocation != NULL && *infoLocation != info) infoLocation = &(*infoLocation)->next; // If not found, someone else beat us to it. if (*infoLocation != info) return B_OK; *infoLocation = info->next; locker.Unlock(); // deselect the selected events uint16 eventsToDeselect = info->selected_events & ~B_EVENT_INVALID; if (descriptor->ops->fd_deselect != NULL && eventsToDeselect != 0) { for (uint16 event = 1; event < 16; event++) { if ((eventsToDeselect & SELECT_FLAG(event)) != 0) { descriptor->ops->fd_deselect(descriptor, event, (selectsync*)info); } } } put_select_sync(info->sync); return B_OK; }
static status_t fd_ioctl(bool kernelFD, int fd, ulong op, void* buffer, size_t length) { struct file_descriptor* descriptor; int status; descriptor = get_fd(get_current_io_context(kernelFD), fd); if (descriptor == NULL) return B_FILE_ERROR; if (descriptor->ops->fd_ioctl) status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length); else status = EOPNOTSUPP; put_fd(descriptor); return status; }
fssh_ssize_t _kern_writev(int fd, fssh_off_t pos, const fssh_iovec *vecs, fssh_size_t count) { struct file_descriptor *descriptor; fssh_ssize_t bytesWritten = 0; fssh_status_t status; uint32_t i; descriptor = get_fd(get_current_io_context(true), fd); if (!descriptor) return FSSH_B_FILE_ERROR; if ((descriptor->open_mode & FSSH_O_RWMASK) == FSSH_O_RDONLY) { put_fd(descriptor); return FSSH_B_FILE_ERROR; } if (pos == -1) pos = descriptor->pos; if (descriptor->ops->fd_write) { for (i = 0; i < count; i++) { fssh_size_t length = vecs[i].iov_len; status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length); if (status < FSSH_B_OK) { bytesWritten = status; break; } if ((uint32_t)bytesWritten + length > FSSH_SSIZE_MAX) bytesWritten = FSSH_SSIZE_MAX; else bytesWritten += (fssh_ssize_t)length; pos += vecs[i].iov_len; } } else bytesWritten = FSSH_B_BAD_VALUE; descriptor->pos = pos; put_fd(descriptor); return bytesWritten; }
status_t _user_rewind_dir(int fd) { struct file_descriptor* descriptor; status_t status; TRACE(("user_rewind_dir(fd = %d)\n", fd)); descriptor = get_fd(get_current_io_context(false), fd); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return B_FILE_ERROR; if (descriptor->ops->fd_rewind_dir != NULL) status = descriptor->ops->fd_rewind_dir(descriptor); else status = B_UNSUPPORTED; put_fd(descriptor); return status; }
static fssh_status_t common_close(int fd, bool kernel) { struct io_context *io = get_current_io_context(kernel); struct file_descriptor *descriptor = remove_fd(io, fd); if (descriptor == NULL) return FSSH_B_FILE_ERROR; #ifdef TRACE_FD if (!kernel) TRACE(("_user_close(descriptor = %p)\n", descriptor)); #endif close_fd(descriptor); put_fd(descriptor); // the reference associated with the slot return FSSH_B_OK; }
fssh_status_t _kern_ioctl(int fd, uint32_t op, void *buffer, fssh_size_t length) { struct file_descriptor *descriptor; int status; TRACE(("sys_ioctl: fd %d\n", fd)); descriptor = get_fd(get_current_io_context(true), fd); if (descriptor == NULL) return FSSH_B_FILE_ERROR; if (descriptor->ops->fd_ioctl) status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length); else status = FSSH_EOPNOTSUPP; put_fd(descriptor); return status; }
fssh_status_t _kern_rewind_dir(int fd) { struct file_descriptor *descriptor; fssh_status_t status; TRACE(("sys_rewind_dir(fd = %d)\n",fd)); descriptor = get_fd(get_current_io_context(true), fd); if (descriptor == NULL) return FSSH_B_FILE_ERROR; if (descriptor->ops->fd_rewind_dir) status = descriptor->ops->fd_rewind_dir(descriptor); else status = FSSH_EOPNOTSUPP; put_fd(descriptor); return status; }
status_t _user_rewind_dir(int fd) { struct file_descriptor* descriptor; status_t status; TRACE(("user_rewind_dir(fd = %d)\n", fd)); descriptor = get_fd(get_current_io_context(false), fd); if (descriptor == NULL) return B_FILE_ERROR; if (descriptor->ops->fd_rewind_dir) status = descriptor->ops->fd_rewind_dir(descriptor); else status = EOPNOTSUPP; put_fd(descriptor); return status; }
off_t _user_seek(int fd, off_t pos, int seekType) { syscall_64_bit_return_value(); struct file_descriptor* descriptor; descriptor = get_fd(get_current_io_context(false), fd); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return B_FILE_ERROR; TRACE(("user_seek(descriptor = %p)\n", descriptor)); if (descriptor->ops->fd_seek != NULL) pos = descriptor->ops->fd_seek(descriptor, pos, seekType); else pos = ESPIPE; put_fd(descriptor); return pos; }
static status_t fd_ioctl(bool kernelFD, int fd, uint32 op, void* buffer, size_t length) { struct file_descriptor* descriptor; int status; descriptor = get_fd(get_current_io_context(kernelFD), fd); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return B_FILE_ERROR; if (descriptor->ops->fd_ioctl != NULL) status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length); else status = B_DEV_INVALID_IOCTL; if (status == B_DEV_INVALID_IOCTL) status = ENOTTY; put_fd(descriptor); return status; }
fssh_ssize_t _kern_read_dir(int fd, struct fssh_dirent *buffer, fssh_size_t bufferSize, uint32_t maxCount) { struct file_descriptor *descriptor; fssh_ssize_t retval; TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n",fd, buffer, bufferSize, maxCount)); descriptor = get_fd(get_current_io_context(true), fd); if (descriptor == NULL) return FSSH_B_FILE_ERROR; if (descriptor->ops->fd_read_dir) { uint32_t count = maxCount; retval = descriptor->ops->fd_read_dir(descriptor, buffer, bufferSize, &count); if (retval >= 0) retval = count; } else retval = FSSH_EOPNOTSUPP; put_fd(descriptor); return retval; }
inline file_descriptor* SetTo(int fd, bool kernel, bool contextLocked = false) { return SetTo(get_current_io_context(kernel), fd, contextLocked); }
static status_t common_close(int fd, bool kernel) { return close_fd_index(get_current_io_context(kernel), fd); }
status_t select_fd(int32 fd, struct select_info* info, bool kernel) { TRACE(("select_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd, info, info->sync, info->selected_events)); FDGetter fdGetter; // define before the context locker, so it will be destroyed after it io_context* context = get_current_io_context(kernel); MutexLocker locker(context->io_mutex); struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true); if (descriptor == NULL || (descriptor->open_mode & O_DISCONNECTED) != 0) return B_FILE_ERROR; uint16 eventsToSelect = info->selected_events & ~B_EVENT_INVALID; if (descriptor->ops->fd_select == NULL && eventsToSelect != 0) { // if the I/O subsystem doesn't support select(), we will // immediately notify the select call return notify_select_events(info, eventsToSelect); } // We need the FD to stay open while we're doing this, so no select()/ // deselect() will be called on it after it is closed. atomic_add(&descriptor->open_count, 1); locker.Unlock(); // select any events asked for uint32 selectedEvents = 0; for (uint16 event = 1; event < 16; event++) { if ((eventsToSelect & SELECT_FLAG(event)) != 0 && descriptor->ops->fd_select(descriptor, event, (selectsync*)info) == B_OK) { selectedEvents |= SELECT_FLAG(event); } } info->selected_events = selectedEvents | (info->selected_events & B_EVENT_INVALID); // Add the info to the IO context. Even if nothing has been selected -- we // always support B_EVENT_INVALID. locker.Lock(); if (context->fds[fd] != descriptor) { // Someone close()d the index in the meantime. deselect() all // events. info->next = NULL; deselect_select_infos(descriptor, info, false); // Release our open reference of the descriptor. close_fd(descriptor); return B_FILE_ERROR; } // The FD index hasn't changed, so we add the select info to the table. info->next = context->select_infos[fd]; context->select_infos[fd] = info; // As long as the info is in the list, we keep a reference to the sync // object. atomic_add(&info->sync->ref_count, 1); // Finally release our open reference. It is safe just to decrement, // since as long as the descriptor is associated with the slot, // someone else still has it open. atomic_add(&descriptor->open_count, -1); return B_OK; }