/* * Command for running sync. */ static int cmd_sync(int nargs, char **args) { (void) nargs; (void) args; vfs_sync(); return 0; }
/* * Command for shutting down. */ static int cmd_quit(int nargs, char **args) { (void) nargs; (void) args; vfs_sync(); sys_reboot(RB_POWEROFF); thread_exit(); return 0; }
/********************************************************************************************************* ** Function name: vfs_unmount ** Descriptions: 取消挂载文件系统 ** input parameters: path 目录 PATH ** param 参数 ** output parameters: NONE ** Returned value: 0 OR -1 *********************************************************************************************************/ int vfs_unmount(const char *path, const char *param) { mount_point_t *point; char *pathbuf; char *filepath; int ret; pathbuf = kmalloc(PATH_BUF_LEN, GFP_KERNEL); if (pathbuf == NULL) { seterrno(ENOMEM); return -1; } mutex_lock(&mount_point_lock, 0); point = vfs_mount_point_lookup2(pathbuf, &filepath, path); /* 查找挂载点 */ if (point == NULL) { mutex_unlock(&mount_point_lock); kfree(pathbuf); return -1; } if (point->fs->unmount == NULL) { mutex_unlock(&mount_point_lock); kfree(pathbuf); seterrno(ENOSYS); return -1; } if (atomic_read(&point->ref) != 0) { mutex_unlock(&mount_point_lock); kfree(pathbuf); seterrno(EBUSY); return -1; } vfs_sync(path); seterrno(0); ret = point->fs->unmount(point, param); /* 取消挂载文件系统 */ if (ret == 0) { if (point->dev != NULL) { atomic_dec(&point->dev->ref); } atomic_dec(&point->fs->ref); mount_point_remove(point); } mutex_unlock(&mount_point_lock); kfree(pathbuf); return ret; }
/* * Command for shutting down. */ static int cmd_quit(int nargs, char **args) { (void)nargs; (void)args; vfs_sync(); sys_reboot(RB_POWEROFF); #if OPT_A2 thread_exit(0); #endif /* OPT_A2 */ return 0; }
static void vfs_connection(ipc_callid_t iid, ipc_call_t *icall, void *arg) { bool cont = true; /* * The connection was opened via the IPC_CONNECT_ME_TO call. * This call needs to be answered. */ async_answer_0(iid, EOK); while (cont) { ipc_call_t call; ipc_callid_t callid = async_get_call(&call); if (!IPC_GET_IMETHOD(call)) break; switch (IPC_GET_IMETHOD(call)) { case VFS_IN_REGISTER: vfs_register(callid, &call); cont = false; break; case VFS_IN_MOUNT: vfs_mount(callid, &call); break; case VFS_IN_UNMOUNT: vfs_unmount(callid, &call); break; case VFS_IN_OPEN: vfs_open(callid, &call); break; case VFS_IN_CLOSE: vfs_close(callid, &call); break; case VFS_IN_READ: vfs_read(callid, &call); break; case VFS_IN_WRITE: vfs_write(callid, &call); break; case VFS_IN_SEEK: vfs_seek(callid, &call); break; case VFS_IN_TRUNCATE: vfs_truncate(callid, &call); break; case VFS_IN_FSTAT: vfs_fstat(callid, &call); break; case VFS_IN_STAT: vfs_stat(callid, &call); break; case VFS_IN_MKDIR: vfs_mkdir(callid, &call); break; case VFS_IN_UNLINK: vfs_unlink(callid, &call); break; case VFS_IN_RENAME: vfs_rename(callid, &call); break; case VFS_IN_SYNC: vfs_sync(callid, &call); break; case VFS_IN_DUP: vfs_dup(callid, &call); break; case VFS_IN_WAIT_HANDLE: vfs_wait_handle(callid, &call); break; case VFS_IN_MTAB_GET: vfs_get_mtab(callid, &call); break; default: async_answer_0(callid, ENOTSUP); break; } } /* * Open files for this client will be cleaned up when its last * connection fibril terminates. */ }
void panic(const char *fmt, ...) { va_list ap; /* * When we reach panic, the system is usually fairly screwed up. * It's not entirely uncommon for anything else we try to do * here to trigger more panics. * * This variable makes sure that if we try to do something here, * and it causes another panic, *that* panic doesn't try again; * trying again almost inevitably causes infinite recursion. * * This is not excessively paranoid - these things DO happen! */ static volatile int evil; if (evil == 0) { evil = 1; /* * Not only do we not want to be interrupted while * panicking, but we also want the console to be * printing in polling mode so as not to do context * switches. So turn interrupts off on this CPU. */ splhigh(); } if (evil == 1) { evil = 2; /* Kill off other threads and halt other CPUs. */ thread_panic(); } if (evil == 2) { evil = 3; /* Print the message. */ kprintf("panic: "); putch_prepare(); va_start(ap, fmt); __vprintf(console_send, NULL, fmt, ap); va_end(ap); putch_complete(); } if (evil == 3) { evil = 4; /* Try to sync the disks. */ vfs_sync(); } if (evil == 4) { evil = 5; /* Shut down or reboot the system. */ mainbus_panic(); } /* * Last resort, just in case. */ for (;;); }
int dr_suspend(dr_sr_handle_t *srh) { dr_handle_t *handle; int force; int dev_errs_idx; uint64_t dev_errs[DR_MAX_ERR_INT]; int rc = DDI_SUCCESS; handle = srh->sr_dr_handlep; force = dr_cmd_flags(handle) & SBD_FLAG_FORCE; /* * update the signature block */ CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL, CPU->cpu_id); i_ndi_block_device_tree_changes(&handle->h_ndi); prom_printf("\nDR: suspending user threads...\n"); srh->sr_suspend_state = DR_SRSTATE_USER; if (((rc = dr_stop_user_threads(srh)) != DDI_SUCCESS) && dr_check_user_stop_result) { dr_resume(srh); return (rc); } if (!force) { struct dr_ref drc = {0}; prom_printf("\nDR: checking devices...\n"); dev_errs_idx = 0; drc.arr = dev_errs; drc.idx = &dev_errs_idx; drc.len = DR_MAX_ERR_INT; /* * Since the root node can never go away, it * doesn't have to be held. */ ddi_walk_devs(ddi_root_node(), dr_check_unsafe_major, &drc); if (dev_errs_idx) { handle->h_err = drerr_int(ESBD_UNSAFE, dev_errs, dev_errs_idx, 1); dr_resume(srh); return (DDI_FAILURE); } PR_QR("done\n"); } else { prom_printf("\nDR: dr_suspend invoked with force flag\n"); } #ifndef SKIP_SYNC /* * This sync swap out all user pages */ vfs_sync(SYNC_ALL); #endif /* * special treatment for lock manager */ lm_cprsuspend(); #ifndef SKIP_SYNC /* * sync the file system in case we never make it back */ sync(); #endif /* * now suspend drivers */ prom_printf("DR: suspending drivers...\n"); srh->sr_suspend_state = DR_SRSTATE_DRIVER; srh->sr_err_idx = 0; /* No parent to hold busy */ if ((rc = dr_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) { if (srh->sr_err_idx && srh->sr_dr_handlep) { (srh->sr_dr_handlep)->h_err = drerr_int(ESBD_SUSPEND, srh->sr_err_ints, srh->sr_err_idx, 1); } dr_resume(srh); return (rc); } drmach_suspend_last(); /* * finally, grab all cpus */ srh->sr_suspend_state = DR_SRSTATE_FULL; /* * if watchdog was activated, disable it */ if (watchdog_activated) { mutex_enter(&tod_lock); tod_ops.tod_clear_watchdog_timer(); mutex_exit(&tod_lock); srh->sr_flags |= SR_FLAG_WATCHDOG; } else { srh->sr_flags &= ~(SR_FLAG_WATCHDOG); } /* * Update the signature block. * This must be done before cpus are paused, since on Starcat the * cpu signature update aquires an adaptive mutex in the iosram driver. * Blocking with cpus paused can lead to deadlock. */ CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id); mutex_enter(&cpu_lock); pause_cpus(NULL); dr_stop_intr(); return (rc); }
int sbdp_suspend(sbdp_sr_handle_t *srh) { int force; int rc = DDI_SUCCESS; force = (srh && (srh->sr_flags & SBDP_IOCTL_FLAG_FORCE)); /* * if no force flag, check for unsafe drivers */ if (force) { SBDP_DBG_QR("\nsbdp_suspend invoked with force flag"); } /* * update the signature block */ CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL, CPU->cpu_id); /* * first, stop all user threads */ SBDP_DBG_QR("SBDP: suspending user threads...\n"); SR_SET_STATE(srh, SBDP_SRSTATE_USER); if (((rc = sbdp_stop_user_threads(srh)) != DDI_SUCCESS) && sbdp_check_user_stop_result) { sbdp_resume(srh); return (rc); } #ifndef SKIP_SYNC /* * This sync swap out all user pages */ vfs_sync(SYNC_ALL); #endif /* * special treatment for lock manager */ lm_cprsuspend(); #ifndef SKIP_SYNC /* * sync the file system in case we never make it back */ sync(); #endif /* * now suspend drivers */ SBDP_DBG_QR("SBDP: suspending drivers...\n"); SR_SET_STATE(srh, SBDP_SRSTATE_DRIVER); /* * Root node doesn't have to be held in any way. */ if ((rc = sbdp_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) { sbdp_resume(srh); return (rc); } /* * finally, grab all cpus */ SR_SET_STATE(srh, SBDP_SRSTATE_FULL); /* * if watchdog was activated, disable it */ if (watchdog_activated) { mutex_enter(&tod_lock); saved_watchdog_seconds = tod_ops.tod_clear_watchdog_timer(); mutex_exit(&tod_lock); SR_SET_FLAG(srh, SR_FLAG_WATCHDOG); } else { SR_CLEAR_FLAG(srh, SR_FLAG_WATCHDOG); } mutex_enter(&cpu_lock); pause_cpus(NULL); sbdp_stop_intr(); /* * update the signature block */ CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id); return (rc); }