asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; int current_log_level = default_message_loglevel; unsigned long flags; int this_cpu; char *p; boot_delay_msec(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress) { recursion_bug = 1; goto out_restore_irqs; } zap_locks(); } lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (recursion_bug) { recursion_bug = 0; strcpy(printk_buf, recursion_bug_msg); printed_len = sizeof(recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); #ifdef CONFIG_DEBUG_LL printascii(printk_buf); #endif /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for (p = printk_buf; *p; p++) { if (new_text_line) { /* If a token, set current_log_level and skip over */ if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' && p[2] == '>') { current_log_level = p[1] - '0'; p += 3; printed_len -= 3; } /* Always output the token */ emit_log_char('<'); emit_log_char(current_log_level + '0'); emit_log_char('>'); printed_len += 3; new_text_line = 0; if (printk_time) { /* Follow the token with the time */ char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(printk_cpu); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } if (!*p) break; } emit_log_char(*p); if (*p == '\n') new_text_line = 1; } /* * Try to acquire and then immediately release the * console semaphore. The release will do all the * actual magic (print out buffers, wake up klogd, * etc). * * The acquire_console_semaphore_for_printk() function * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ if (acquire_console_semaphore_for_printk(this_cpu)) release_console_sem(); lockdep_on(); out_restore_irqs: raw_local_irq_restore(flags); preempt_enable(); return printed_len; }
void spm_go_to_sodi(u32 spm_flags, u32 spm_data) { struct wake_status wakesta; unsigned long flags; struct mtk_irq_mask mask; wake_reason_t wr = WR_NONE; struct pcm_desc *pcmdesc = __spm_sodi.pcmdesc; struct pwr_ctrl *pwrctrl = __spm_sodi.pwrctrl; #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(1<<SPM_SODI_ENTER); #endif #if defined (CONFIG_ARM_PSCI)||defined(CONFIG_MTK_PSCI) spm_flags &= ~SPM_DISABLE_ATF_ABORT; #else spm_flags |= SPM_DISABLE_ATF_ABORT; #endif if(gSpm_SODI_mempll_pwr_mode == 1) { spm_flags |= SPM_MEMPLL_CG_EN; //MEMPLL CG mode } else { spm_flags &= ~SPM_MEMPLL_CG_EN; //DDRPHY power down mode } set_pwrctrl_pcm_flags(pwrctrl, spm_flags); //If Vcore DVFS is disable, force to disable SODI internal Vcore DVS if (pwrctrl->pcm_flags_cust == 0) { if ((pwrctrl->pcm_flags & SPM_VCORE_DVFS_EN) == 0) { pwrctrl->pcm_flags |= SPM_VCORE_DVS_EVENT_DIS; } } //SODI will not decrease Vcore voltage in HPM mode. if ((pwrctrl->pcm_flags & SPM_VCORE_DVS_EVENT_DIS) == 0) { if (get_ddr_khz() != FDDR_S1_KHZ) { #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_VCORE_HPM)); #endif //printk("SODI: get_ddr_khz() = %d\n", get_ddr_khz()); } else { #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_VCORE_LPM)); #endif } } //enable APxGPT timer soidle_before_wfi(0); lockdep_off(); spin_lock_irqsave(&__spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(SPM_IRQ0_ID); mt_cirq_clone_gic(); mt_cirq_enable(); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_SPM_FLOW)); #endif __spm_reset_and_init_pcm(pcmdesc); /* * When commond-queue is in shut-down mode, SPM will hang if it tries to access commond-queue status. * Follwoing patch is to let SODI driver to notify SPM that commond-queue is in shut-down mode or not to avoid above SPM hang issue. * But, now display can automatically notify SPM that command-queue is shut-down or not, so following code is not needed anymore. */ #if 0 //check GCE if(clock_is_on(MT_CG_INFRA_GCE)) { pwrctrl->pcm_flags &= ~SPM_DDR_HIGH_SPEED; } else { pwrctrl->pcm_flags |= SPM_DDR_HIGH_SPEED; } #endif __spm_kick_im_to_fetch(pcmdesc); __spm_init_pcm_register(); __spm_init_event_vector(pcmdesc); if (pwrctrl->pcm_flags_cust == 0) { //Display set SPM_PCM_SRC_REQ[0]=1'b1 to force DRAM not enter self-refresh mode if((spm_read(SPM_PCM_SRC_REQ)&0x00000001)) { pwrctrl->pcm_apsrc_req = 1; } else { pwrctrl->pcm_apsrc_req = 0; } } __spm_set_power_control(pwrctrl); __spm_set_wakeup_event(pwrctrl); #if SODI_DVT_PCM_TIMER_DISABLE //PCM_Timer is enable in above '__spm_set_wakeup_event(pwrctrl);', disable PCM Timer here spm_write(SPM_PCM_CON1 ,spm_read(SPM_PCM_CON1)&(~CON1_PCM_TIMER_EN)); #endif __spm_kick_pcm_to_run(pwrctrl); spm_sodi_pre_process(); #if SPM_SODI_DUMP_REGS printk("============SODI Before============\n"); spm_sodi_dump_regs(); //dump debug info #endif #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_WFI)); #endif spm_trigger_wfi_for_sodi(pwrctrl); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_LEAVE_WFI)); #endif #if SPM_SODI_DUMP_REGS printk("============SODI After=============\n"); spm_sodi_dump_regs();//dump debug info #endif spm_sodi_post_process(); __spm_get_wakeup_status(&wakesta); sodi_debug("emi-selfrefrsh cnt = %d, pcm_flag = 0x%x, SPM_PCM_RESERVE2 = 0x%x, %s\n", spm_read(SPM_PCM_PASR_DPD_3), spm_read(SPM_PCM_FLAGS), spm_read(SPM_PCM_RESERVE2), pcmdesc->version); __spm_clean_after_wakeup(); wr = __spm_output_wake_reason(&wakesta, pcmdesc, false); if (wr == WR_PCM_ASSERT) { sodi_err("PCM ASSERT AT %u (%s), r13 = 0x%x, debug_flag = 0x%x\n", wakesta.assert_pc, pcmdesc->version, wakesta.r13, wakesta.debug_flag); } #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_LEAVE_SPM_FLOW)); #endif mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&__spm_lock, flags); lockdep_on(); //stop APxGPT timer and enable caore0 local timer soidle_after_wfi(0); #if SODI_DVT_SPM_MEM_RW_TEST { static u32 magic_init = 0; int i =0; if(magic_init == 0){ magic_init++; printk("magicNumArray:0x%p",magicArray); } for(i=0;i<16;i++) { if(magicArray[i]!=SODI_DVT_MAGIC_NUM) { printk("Error: sodi magic number no match!!!"); ASSERT(0); } } if (i>=16) printk("SODI_DVT_SPM_MEM_RW_TEST pass (count = %d)\n", magic_init); } #endif #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(0); #endif }
wake_reason_t spm_go_to_dpidle(u32 spm_flags, u32 spm_data) { struct wake_status wakesta; unsigned long flags; struct mtk_irq_mask mask; wake_reason_t wr = WR_NONE; struct pcm_desc *pcmdesc = __spm_dpidle.pcmdesc; struct pwr_ctrl *pwrctrl = __spm_dpidle.pwrctrl; #if SPM_AEE_RR_REC aee_rr_rec_deepidle_val(1<<SPM_DEEPIDLE_ENTER); #endif set_pwrctrl_pcm_flags(pwrctrl, spm_flags); //pwrctrl->timer_val = 1 * 32768; spm_dpidle_before_wfi(); lockdep_off(); spin_lock_irqsave(&__spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(SPM_IRQ0_ID); mt_cirq_clone_gic(); mt_cirq_enable(); #if SPM_AEE_RR_REC aee_rr_rec_deepidle_val(aee_rr_curr_deepidle_val()|(1<<SPM_DEEPIDLE_ENTER_UART_SLEEP)); #endif if (request_uart_to_sleep()) { wr = WR_UART_BUSY; goto RESTORE_IRQ; } __spm_reset_and_init_pcm(pcmdesc); __spm_kick_im_to_fetch(pcmdesc); __spm_init_pcm_register(); __spm_init_event_vector(pcmdesc); __spm_set_power_control(pwrctrl); __spm_set_wakeup_event(pwrctrl); spm_dpidle_pre_process(); __spm_kick_pcm_to_run(pwrctrl); #if SPM_AEE_RR_REC aee_rr_rec_deepidle_val(aee_rr_curr_deepidle_val()|(1<<SPM_DEEPIDLE_ENTER_WFI)); #endif #ifdef SPM_DEEPIDLE_PROFILE_TIME gpt_get_cnt(SPM_PROFILE_APXGPT,&dpidle_profile[1]); #endif spm_trigger_wfi_for_dpidle(pwrctrl); #ifdef SPM_DEEPIDLE_PROFILE_TIME gpt_get_cnt(SPM_PROFILE_APXGPT,&dpidle_profile[2]); #endif #if SPM_AEE_RR_REC aee_rr_rec_deepidle_val(aee_rr_curr_deepidle_val()|(1<<SPM_DEEPIDLE_LEAVE_WFI)); #endif spm_dpidle_post_process(); __spm_get_wakeup_status(&wakesta); __spm_clean_after_wakeup(); #if SPM_AEE_RR_REC aee_rr_rec_deepidle_val(aee_rr_curr_deepidle_val()|(1<<SPM_DEEPIDLE_ENTER_UART_AWAKE)); #endif request_uart_to_wakeup(); wr = __spm_output_wake_reason(&wakesta, pcmdesc, false); RESTORE_IRQ: mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&__spm_lock, flags); lockdep_on(); spm_dpidle_after_wfi(); #if SPM_AEE_RR_REC aee_rr_rec_deepidle_val(0); #endif return wr; }
static void sysrq_handle_reboot(int key) { lockdep_off(); local_irq_enable(); emergency_restart(); }
int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry, struct inode *dir, struct path *path, struct inode **delegated_inode) { int err; struct path tmp = { .mnt = path->mnt }; struct dentry *d; IMustLock(dir); IMustLock(src_dir); d = path->dentry; path->dentry = d->d_parent; tmp.dentry = src_dentry->d_parent; err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0); path->dentry = d; if (unlikely(err)) goto out; lockdep_off(); err = vfs_rename(src_dir, src_dentry, dir, path->dentry, delegated_inode, /*flags*/0); lockdep_on(); if (!err) { int did; tmp.dentry = d->d_parent; vfsub_update_h_iattr(&tmp, &did); if (did) { tmp.dentry = src_dentry; vfsub_update_h_iattr(&tmp, /*did*/NULL); tmp.dentry = src_dentry->d_parent; vfsub_update_h_iattr(&tmp, /*did*/NULL); } /*ignore*/ } out: return err; } int vfsub_mkdir(struct inode *dir, struct path *path, int mode) { int err; struct dentry *d; IMustLock(dir); d = path->dentry; path->dentry = d->d_parent; err = security_path_mkdir(path, d, mode); path->dentry = d; if (unlikely(err)) goto out; lockdep_off(); err = vfs_mkdir(dir, path->dentry, mode); lockdep_on(); if (!err) { struct path tmp = *path; int did; vfsub_update_h_iattr(&tmp, &did); if (did) { tmp.dentry = path->dentry->d_parent; vfsub_update_h_iattr(&tmp, /*did*/NULL); } /*ignore*/ } out: return err; }
static int __copyup_reg_data(struct dentry *dentry, struct dentry *new_lower_dentry, int new_bindex, struct dentry *old_lower_dentry, int old_bindex, struct file **copyup_file, loff_t len) { struct super_block *sb = dentry->d_sb; struct file *input_file; struct file *output_file; struct vfsmount *output_mnt; mm_segment_t old_fs; char *buf = NULL; ssize_t read_bytes, write_bytes; loff_t size; int err = 0; /* open old file */ unionfs_mntget(dentry, old_bindex); branchget(sb, old_bindex); /* dentry_open calls dput and mntput if it returns an error */ input_file = dentry_open(old_lower_dentry, unionfs_lower_mnt_idx(dentry, old_bindex), O_RDONLY | O_LARGEFILE, current_cred()); if (IS_ERR(input_file)) { dput(old_lower_dentry); err = PTR_ERR(input_file); goto out; } if (unlikely(!input_file->f_op || !input_file->f_op->read)) { err = -EINVAL; goto out_close_in; } /* open new file */ dget(new_lower_dentry); output_mnt = unionfs_mntget(sb->s_root, new_bindex); branchget(sb, new_bindex); output_file = dentry_open(new_lower_dentry, output_mnt, O_RDWR | O_LARGEFILE, current_cred()); if (IS_ERR(output_file)) { err = PTR_ERR(output_file); goto out_close_in2; } if (unlikely(!output_file->f_op || !output_file->f_op->write)) { err = -EINVAL; goto out_close_out; } /* allocating a buffer */ buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (unlikely(!buf)) { err = -ENOMEM; goto out_close_out; } input_file->f_pos = 0; output_file->f_pos = 0; old_fs = get_fs(); set_fs(KERNEL_DS); size = len; err = 0; do { if (len >= PAGE_SIZE) size = PAGE_SIZE; else if ((len < PAGE_SIZE) && (len > 0)) size = len; len -= PAGE_SIZE; read_bytes = input_file->f_op->read(input_file, (char __user *)buf, size, &input_file->f_pos); if (read_bytes <= 0) { err = read_bytes; break; } /* see Documentation/filesystems/unionfs/issues.txt */ lockdep_off(); write_bytes = output_file->f_op->write(output_file, (char __user *)buf, read_bytes, &output_file->f_pos); lockdep_on(); if ((write_bytes < 0) || (write_bytes < read_bytes)) { err = write_bytes; break; } } while ((read_bytes > 0) && (len > 0)); set_fs(old_fs); kfree(buf); if (!err) err = output_file->f_op->fsync(output_file, new_lower_dentry, 0); if (err) goto out_close_out; if (copyup_file) { *copyup_file = output_file; goto out_close_in; } out_close_out: fput(output_file); out_close_in2: branchput(sb, new_bindex); out_close_in: fput(input_file); out: branchput(sb, old_bindex); return err; }
/* Basically stolen from Linux kernel's printk TODO: Handle log levels. */ int log_vprintk(const char *levelstr, const char *func, const char *fmt, va_list args) { int printed_len = 0; int this_cpu; unsigned long flags; char *p; preempt_disable(); this_cpu = smp_processor_id(); lockdep_off(); spin_lock_irqsave(&logbuf_lock, flags); printk_cpu = this_cpu; /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); p = printk_buf; /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for ( ; *p; p++) { if (new_text_line) { const char *lp; new_text_line = 0; if (log_time) { /* Follow the token with the time */ char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(printk_cpu); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } /* Emit log level */ emit_log_char('['); printed_len++; for (lp = levelstr; *lp != '\0'; lp++) { emit_log_char(*lp); printed_len++; } emit_log_char(']'); printed_len++; for (lp = func; *lp != '\0'; lp++) { emit_log_char(*lp); printed_len++; } emit_log_char(':'); emit_log_char(' '); printed_len += 2; if (!*p) break; } emit_log_char(*p); if (*p == '\n') new_text_line = 1; } wake_up_interruptible(&log_wait); spin_unlock_irqrestore(&logbuf_lock, flags); lockdep_on(); preempt_enable(); return printed_len; }
void spm_go_to_sodi(u32 spm_flags, u32 spm_data) { struct wake_status wakesta; unsigned long flags; struct mtk_irq_mask mask; wake_reason_t wr = WR_NONE; struct pcm_desc *pcmdesc = __spm_sodi.pcmdesc; struct pwr_ctrl *pwrctrl = __spm_sodi.pwrctrl; #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(1<<SPM_SODI_ENTER); #endif set_pwrctrl_pcm_flags(pwrctrl, spm_flags); /* set PMIC WRAP table for deepidle power control */ mt_cpufreq_set_pmic_phase(PMIC_WRAP_PHASE_SODI); soidle_before_wfi(0); lockdep_off(); spin_lock_irqsave(&__spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(SPM_IRQ0_ID/*MT_SPM_IRQ_ID*/); mt_cirq_clone_gic(); mt_cirq_enable(); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_SPM_FLOW)); #endif __spm_reset_and_init_pcm(pcmdesc); #if 0 /* 0: mempll shutdown mode; 1: cg mode */ gSpm_SODI_mempll_pwr_mode ? (pwrctrl->pcm_flags |= SPM_MEMPLL_CPU) : (pwrctrl->pcm_flags &= ~SPM_MEMPLL_CPU); #endif __spm_kick_im_to_fetch(pcmdesc); __spm_init_pcm_register(); __spm_init_event_vector(pcmdesc); /* set pcm_apsrc_req to be 1 if 10006b0c[0] is 1 */ if ((spm_read(SPM_PCM_SRC_REQ) & 1) || pwrctrl->pcm_apsrc_req) pwrctrl->pcm_apsrc_req = 1; else pwrctrl->pcm_apsrc_req = 0; __spm_set_power_control(pwrctrl); __spm_set_wakeup_event(pwrctrl); /* set pcm_flags[18] to be 1 if 10006b08[7] is 1 */ if ((spm_read(SPM_PCM_FLAGS) & SPM_MEMPLL_RESET) || gSpm_SODI_mempll_pwr_mode || (pwrctrl->pcm_flags_cust & SPM_MEMPLL_CPU)) pwrctrl->pcm_flags |= SPM_MEMPLL_CPU; else pwrctrl->pcm_flags &= ~SPM_MEMPLL_CPU; __spm_kick_pcm_to_run(pwrctrl); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_WFI)); #endif spm_trigger_wfi_for_sodi(pwrctrl); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_LEAVE_WFI)); #endif __spm_get_wakeup_status(&wakesta); __spm_clean_after_wakeup(); wr = __spm_output_wake_reason(&wakesta, pcmdesc, false); /* for test */ /* wr = __spm_output_wake_reason(&wakesta, pcmdesc, true); */ #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_LEAVE_SPM_FLOW)); #endif mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&__spm_lock, flags); lockdep_on(); soidle_after_wfi(0); /* set PMIC WRAP table for normal power control */ mt_cpufreq_set_pmic_phase(PMIC_WRAP_PHASE_NORMAL); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(0); #endif //return wr; }
struct dentry *vfsub_lookup_hash(struct nameidata *nd) { struct path path = { .mnt = nd->path.mnt }; IMustLock(nd->path.dentry->d_inode); path.dentry = lookup_hash(nd); if (IS_ERR(path.dentry)) goto out; if (path.dentry->d_inode) vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/ out: AuTraceErrPtr(path.dentry); return path.dentry; } /* * this is "VFS:__lookup_one_len()" which was removed and merged into * VFS:lookup_one_len() by the commit. * 6a96ba5 2011-03-14 kill __lookup_one_len() * this function should always be equivalent to the corresponding part in * VFS:lookup_one_len(). */ int vfsub_name_hash(const char *name, struct qstr *this, int len) { unsigned long hash; unsigned int c; this->name = name; this->len = len; if (!len) return -EACCES; hash = init_name_hash(); while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return -EACCES; hash = partial_name_hash(c, hash); } this->hash = end_name_hash(hash); return 0; } /* ---------------------------------------------------------------------- */ struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1, struct dentry *d2, struct au_hinode *hdir2) { struct dentry *d; lockdep_off(); d = lock_rename(d1, d2); lockdep_on(); au_hn_suspend(hdir1); if (hdir1 != hdir2) au_hn_suspend(hdir2); return d; } void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1, struct dentry *d2, struct au_hinode *hdir2) { au_hn_resume(hdir1); if (hdir1 != hdir2) au_hn_resume(hdir2); lockdep_off(); unlock_rename(d1, d2); lockdep_on(); }
/* * release all lower object references & free the file info structure * * No need to grab sb info's rwsem. */ int unionfs_file_release(struct inode *inode, struct file *file) { struct file *lower_file = NULL; struct unionfs_file_info *fileinfo; struct unionfs_inode_info *inodeinfo; struct super_block *sb = inode->i_sb; struct dentry *dentry = file->f_path.dentry; struct dentry *parent; int bindex, bstart, bend; int fgen, err = 0; /* * Since mm/memory.c:might_fault() (under PROVE_LOCKING) was * modified in 2.6.29-rc1 to call might_lock_read on mmap_sem, this * has been causing false positives in file system stacking layers. * In particular, our ->mmap is called after sys_mmap2 already holds * mmap_sem, then we lock our own mutexes; but earlier, it's * possible for lockdep to have locked our mutexes first, and then * we call a lower ->readdir which could call might_fault. The * different ordering of the locks is what lockdep complains about * -- unnecessarily. Therefore, we have no choice but to tell * lockdep to temporarily turn off lockdep here. Note: the comments * inside might_sleep also suggest that it would have been * nicer to only annotate paths that needs that might_lock_read. */ lockdep_off(); unionfs_read_lock(sb, UNIONFS_SMUTEX_PARENT); parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT); unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD); /* * We try to revalidate, but the VFS ignores return return values * from file->release, so we must always try to succeed here, * including to do the kfree and dput below. So if revalidation * failed, all we can do is print some message and keep going. */ err = unionfs_file_revalidate(file, parent, UNIONFS_F(file)->wrote_to_file); if (!err) unionfs_check_file(file); fileinfo = UNIONFS_F(file); BUG_ON(file->f_path.dentry->d_inode != inode); inodeinfo = UNIONFS_I(inode); /* fput all the lower files */ fgen = atomic_read(&fileinfo->generation); bstart = fbstart(file); bend = fbend(file); for (bindex = bstart; bindex <= bend; bindex++) { lower_file = unionfs_lower_file_idx(file, bindex); if (lower_file) { unionfs_set_lower_file_idx(file, bindex, NULL); fput(lower_file); branchput(sb, bindex); } /* if there are no more refs to the dentry, dput it */ if (d_deleted(dentry)) { dput(unionfs_lower_dentry_idx(dentry, bindex)); unionfs_set_lower_dentry_idx(dentry, bindex, NULL); } } kfree(fileinfo->lower_files); kfree(fileinfo->saved_branch_ids); if (fileinfo->rdstate) { fileinfo->rdstate->access = jiffies; spin_lock(&inodeinfo->rdlock); inodeinfo->rdcount++; list_add_tail(&fileinfo->rdstate->cache, &inodeinfo->readdircache); mark_inode_dirty(inode); spin_unlock(&inodeinfo->rdlock); fileinfo->rdstate = NULL; } kfree(fileinfo); unionfs_unlock_dentry(dentry); unionfs_unlock_parent(dentry, parent); unionfs_read_unlock(sb); lockdep_on(); return err; }
void au_fi_mmap_unlock(struct file *file) { lockdep_off(); mutex_unlock(&au_fi(file)->fi_mmap); lockdep_on(); }
static int __unionfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *old_parent, struct inode *new_dir, struct dentry *new_dentry, struct dentry *new_parent, int bindex) { int err = 0; struct dentry *lower_old_dentry; struct dentry *lower_new_dentry; struct dentry *lower_old_dir_dentry; struct dentry *lower_new_dir_dentry; struct dentry *trap; lower_new_dentry = unionfs_lower_dentry_idx(new_dentry, bindex); lower_old_dentry = unionfs_lower_dentry_idx(old_dentry, bindex); if (!lower_new_dentry) { lower_new_dentry = create_parents(new_parent->d_inode, new_dentry, new_dentry->d_name.name, bindex); if (IS_ERR(lower_new_dentry)) { err = PTR_ERR(lower_new_dentry); if (IS_COPYUP_ERR(err)) goto out; printk(KERN_ERR "unionfs: error creating directory " "tree for rename, bindex=%d err=%d\n", bindex, err); goto out; } } /* check for and remove whiteout, if any */ err = check_unlink_whiteout(new_dentry, lower_new_dentry, bindex); if (err > 0) /* ignore if whiteout found and successfully removed */ err = 0; if (err) goto out; /* check of old_dentry branch is writable */ err = is_robranch_super(old_dentry->d_sb, bindex); if (err) goto out; dget(lower_old_dentry); dget(lower_new_dentry); lower_old_dir_dentry = dget_parent(lower_old_dentry); lower_new_dir_dentry = dget_parent(lower_new_dentry); /* see Documentation/filesystems/unionfs/issues.txt */ lockdep_off(); trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); /* source should not be ancenstor of target */ if (trap == lower_old_dentry) { err = -EINVAL; goto out_err_unlock; } /* target should not be ancenstor of source */ if (trap == lower_new_dentry) { err = -ENOTEMPTY; goto out_err_unlock; } err = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry, lower_new_dir_dentry->d_inode, lower_new_dentry); out_err_unlock: if (!err) { /* update parent dir times */ fsstack_copy_attr_times(old_dir, lower_old_dir_dentry->d_inode); fsstack_copy_attr_times(new_dir, lower_new_dir_dentry->d_inode); } unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); lockdep_on(); dput(lower_old_dir_dentry); dput(lower_new_dir_dentry); dput(lower_old_dentry); dput(lower_new_dentry); out: if (!err) { /* Fixup the new_dentry. */ if (bindex < dbstart(new_dentry)) dbstart(new_dentry) = bindex; else if (bindex > dbend(new_dentry)) dbend(new_dentry) = bindex; } return err; }
static int unionfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { int err = 0; struct dentry *lower_old_dentry = NULL; struct dentry *lower_new_dentry = NULL; struct dentry *lower_dir_dentry = NULL; struct dentry *old_parent, *new_parent; char *name = NULL; bool valid; unionfs_read_lock(old_dentry->d_sb, UNIONFS_SMUTEX_CHILD); old_parent = dget_parent(old_dentry); new_parent = dget_parent(new_dentry); unionfs_double_lock_parents(old_parent, new_parent); unionfs_double_lock_dentry(old_dentry, new_dentry); valid = __unionfs_d_revalidate(old_dentry, old_parent, false); if (unlikely(!valid)) { err = -ESTALE; goto out; } if (new_dentry->d_inode) { valid = __unionfs_d_revalidate(new_dentry, new_parent, false); if (unlikely(!valid)) { err = -ESTALE; goto out; } } lower_new_dentry = unionfs_lower_dentry(new_dentry); /* check for a whiteout in new dentry branch, and delete it */ err = check_unlink_whiteout(new_dentry, lower_new_dentry, dbstart(new_dentry)); if (err > 0) { /* whiteout found and removed successfully */ lower_dir_dentry = dget_parent(lower_new_dentry); fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); dput(lower_dir_dentry); dir->i_nlink = unionfs_get_nlinks(dir); err = 0; } if (err) goto out; /* check if parent hierachy is needed, then link in same branch */ if (dbstart(old_dentry) != dbstart(new_dentry)) { lower_new_dentry = create_parents(dir, new_dentry, new_dentry->d_name.name, dbstart(old_dentry)); err = PTR_ERR(lower_new_dentry); if (IS_COPYUP_ERR(err)) goto docopyup; if (!lower_new_dentry || IS_ERR(lower_new_dentry)) goto out; } lower_new_dentry = unionfs_lower_dentry(new_dentry); lower_old_dentry = unionfs_lower_dentry(old_dentry); BUG_ON(dbstart(old_dentry) != dbstart(new_dentry)); lower_dir_dentry = lock_parent(lower_new_dentry); err = is_robranch(old_dentry); if (!err) { /* see Documentation/filesystems/unionfs/issues.txt */ lockdep_off(); err = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode, lower_new_dentry); lockdep_on(); } unlock_dir(lower_dir_dentry); docopyup: if (IS_COPYUP_ERR(err)) { int old_bstart = dbstart(old_dentry); int bindex; for (bindex = old_bstart - 1; bindex >= 0; bindex--) { err = copyup_dentry(old_parent->d_inode, old_dentry, old_bstart, bindex, old_dentry->d_name.name, old_dentry->d_name.len, NULL, i_size_read(old_dentry->d_inode)); if (err) continue; lower_new_dentry = create_parents(dir, new_dentry, new_dentry->d_name.name, bindex); lower_old_dentry = unionfs_lower_dentry(old_dentry); lower_dir_dentry = lock_parent(lower_new_dentry); /* see Documentation/filesystems/unionfs/issues.txt */ lockdep_off(); /* do vfs_link */ err = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode, lower_new_dentry); lockdep_on(); unlock_dir(lower_dir_dentry); goto check_link; } goto out; } check_link: if (err || !lower_new_dentry->d_inode) goto out; /* Its a hard link, so use the same inode */ new_dentry->d_inode = igrab(old_dentry->d_inode); d_add(new_dentry, new_dentry->d_inode); unionfs_copy_attr_all(dir, lower_new_dentry->d_parent->d_inode); fsstack_copy_inode_size(dir, lower_new_dentry->d_parent->d_inode); /* propagate number of hard-links */ old_dentry->d_inode->i_nlink = unionfs_get_nlinks(old_dentry->d_inode); /* new dentry's ctime may have changed due to hard-link counts */ unionfs_copy_attr_times(new_dentry->d_inode); out: if (!new_dentry->d_inode) d_drop(new_dentry); kfree(name); if (!err) unionfs_postcopyup_setmnt(new_dentry); unionfs_check_inode(dir); unionfs_check_dentry(new_dentry); unionfs_check_dentry(old_dentry); unionfs_double_unlock_dentry(old_dentry, new_dentry); unionfs_double_unlock_parents(old_parent, new_parent); dput(new_parent); dput(old_parent); unionfs_read_unlock(old_dentry->d_sb); return err; }
asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; int current_log_level = default_message_loglevel; unsigned long flags; int this_cpu; char *p; boot_delay_msec(); printk_delay(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress) { recursion_bug = 1; goto out_restore_irqs; } zap_locks(); } lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (recursion_bug) { recursion_bug = 0; strcpy(printk_buf, recursion_bug_msg); printed_len = strlen(recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); p = printk_buf; /* Do we have a loglevel in the string? */ if (p[0] == '<') { unsigned char c = p[1]; if (c && p[2] == '>') { switch (c) { case '0' ... '7': /* loglevel */ current_log_level = c - '0'; /* Fallthrough - make sure we're on a new line */ case 'd': /* KERN_DEFAULT */ if (!new_text_line) { emit_log_char('\n'); new_text_line = 1; } /* Fallthrough - skip the loglevel */ case 'c': /* KERN_CONT */ p += 3; break; } } }
static inline void action_lock_lock(void) { lockdep_off(); write_lock(&krg_action_lock); }
asmlinkage int vprintk(const char *fmt, va_list args) { unsigned long flags; int printed_len; char *p; static char printk_buf[1024]; static int log_level_unknown = 1; preempt_disable(); if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id()) /* If a crash is occurring during printk() on this CPU, * make sure we can't deadlock */ zap_locks(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = smp_processor_id(); /* Emit the output into the temporary buffer */ printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args); #ifdef CONFIG_DEBUG_LL printascii(printk_buf); #endif /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for (p = printk_buf; *p; p++) { if (log_level_unknown) { /* log_level_unknown signals the start of a new line */ if (printk_time) { int loglev_char; char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; /* * force the log level token to be * before the time output. */ if (p[0] == '<' && p[1] >='0' && p[1] <= '7' && p[2] == '>') { loglev_char = p[1]; p += 3; printed_len -= 3; } else { loglev_char = default_message_loglevel + '0'; } t = printk_clock(); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "<%c>[%5lu.%06lu] ", loglev_char, (unsigned long)t, nanosec_rem/1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } else { if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>') { emit_log_char('<'); emit_log_char(default_message_loglevel + '0'); emit_log_char('>'); printed_len += 3; } } log_level_unknown = 0; if (!*p) break; } emit_log_char(*p); if (*p == '\n') log_level_unknown = 1; } if (!down_trylock(&console_sem)) { /* * We own the drivers. We can drop the spinlock and * let release_console_sem() print the text, maybe ... */ console_locked = 1; printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); /* * Console drivers may assume that per-cpu resources have * been allocated. So unless they're explicitly marked as * being able to cope (CON_ANYTIME) don't call them until * this CPU is officially up. */ if (cpu_online(smp_processor_id()) || have_callable_console()) { console_may_schedule = 0; release_console_sem(); } else { /* Release by hand to avoid flushing the buffer. */ console_locked = 0; up(&console_sem); } lockdep_on(); raw_local_irq_restore(flags); } else { /* * Someone else owns the drivers. We drop the spinlock, which * allows the semaphore holder to proceed and to call the * console drivers with the output which we just produced. */ printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); lockdep_on(); raw_local_irq_restore(flags); } preempt_enable(); return printed_len; }
static int au_cmoo(struct dentry *dentry) { int err, cmoo; unsigned int udba; struct path h_path; struct au_pin pin; struct au_cp_generic cpg = { .dentry = dentry, .bdst = -1, .bsrc = -1, .len = -1, .pin = &pin, .flags = AuCpup_DTIME | AuCpup_HOPEN }; struct inode *inode, *delegated; struct super_block *sb; struct au_sbinfo *sbinfo; struct au_fhsm *fhsm; pid_t pid; struct au_branch *br; struct dentry *parent; struct au_hinode *hdir; DiMustWriteLock(dentry); inode = dentry->d_inode; IiMustWriteLock(inode); err = 0; if (IS_ROOT(dentry)) goto out; cpg.bsrc = au_dbstart(dentry); if (!cpg.bsrc) goto out; sb = dentry->d_sb; sbinfo = au_sbi(sb); fhsm = &sbinfo->si_fhsm; pid = au_fhsm_pid(fhsm); if (pid && (current->pid == pid || current->real_parent->pid == pid)) goto out; br = au_sbr(sb, cpg.bsrc); cmoo = au_br_cmoo(br->br_perm); if (!cmoo) goto out; if (!S_ISREG(inode->i_mode)) cmoo &= AuBrAttr_COO_ALL; if (!cmoo) goto out; parent = dget_parent(dentry); di_write_lock_parent(parent); err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1); cpg.bdst = err; if (unlikely(err < 0)) { err = 0; /* there is no upper writable branch */ goto out_dgrade; } AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst); /* do not respect the coo attrib for the target branch */ err = au_cpup_dirs(dentry, cpg.bdst); if (unlikely(err)) goto out_dgrade; di_downgrade_lock(parent, AuLock_IR); udba = au_opt_udba(sb); err = au_pin(&pin, dentry, cpg.bdst, udba, AuPin_DI_LOCKED | AuPin_MNT_WRITE); if (unlikely(err)) goto out_parent; err = au_sio_cpup_simple(&cpg); au_unpin(&pin); if (unlikely(err)) goto out_parent; if (!(cmoo & AuBrWAttr_MOO)) goto out_parent; /* success */ err = au_pin(&pin, dentry, cpg.bsrc, udba, AuPin_DI_LOCKED | AuPin_MNT_WRITE); if (unlikely(err)) goto out_parent; h_path.mnt = au_br_mnt(br); h_path.dentry = au_h_dptr(dentry, cpg.bsrc); hdir = au_hi(parent->d_inode, cpg.bsrc); delegated = NULL; err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1); au_unpin(&pin); /* todo: keep h_dentry or not? */ if (unlikely(err == -EWOULDBLOCK)) { pr_warn("cannot retry for NFSv4 delegation" " for an internal unlink\n"); iput(delegated); } if (unlikely(err)) { pr_err("unlink %pd after coo failed (%d), ignored\n", dentry, err); err = 0; } goto out_parent; /* success */ out_dgrade: di_downgrade_lock(parent, AuLock_IR); out_parent: di_read_unlock(parent, AuLock_IR); dput(parent); out: AuTraceErr(err); return err; } int au_do_open(struct file *file, struct au_do_open_args *args) { int err, no_lock = args->no_lock; struct dentry *dentry; struct au_finfo *finfo; if (!no_lock) err = au_finfo_init(file, args->fidir); else { lockdep_off(); err = au_finfo_init(file, args->fidir); lockdep_on(); } if (unlikely(err)) goto out; dentry = file->f_dentry; AuDebugOn(IS_ERR_OR_NULL(dentry)); if (!no_lock) { di_write_lock_child(dentry); err = au_cmoo(dentry); di_downgrade_lock(dentry, AuLock_IR); if (!err) err = args->open(file, vfsub_file_flags(file), NULL); di_read_unlock(dentry, AuLock_IR); } else { err = au_cmoo(dentry); if (!err) err = args->open(file, vfsub_file_flags(file), args->h_file); if (!err && au_fbstart(file) != au_dbstart(dentry)) /* * cmoo happens after h_file was opened. * need to refresh file later. */ atomic_dec(&au_fi(file)->fi_generation); } finfo = au_fi(file); if (!err) { finfo->fi_file = file; au_sphl_add(&finfo->fi_hlist, &au_sbi(file->f_dentry->d_sb)->si_files); } if (!no_lock) fi_write_unlock(file); else { lockdep_off(); fi_write_unlock(file); lockdep_on(); } if (unlikely(err)) { finfo->fi_hdir = NULL; au_finfo_fin(file); } out: return err; }