/* * Atomically update the sched_clock epoch. */ static void notrace update_sched_clock(void) { unsigned long flags; u32 cyc; u64 ns; cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); /* * Write epoch_cyc and epoch_ns in a way that the update is * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); cd.epoch_cyc = cyc; raw_local_irq_restore(flags); }
/* * Insert an RCU callback onto the calling CPUs list of 'current batch' * callbacks. Lockless version, can be invoked anywhere except under NMI. */ void call_rcu_sched(struct rcu_head *cb, void (*func)(struct rcu_head *rcu)) { unsigned long flags; struct rcu_data *rd; struct rcu_list *cblist; int which; cb->func = func; cb->next = NULL; raw_local_irq_save(flags); smp_mb(); rd = &rcu_data[rcu_cpu()]; which = ACCESS_ONCE(rcu_which); cblist = &rd->cblist[which]; /* The following is not NMI-safe, therefore call_rcu() * cannot be invoked under NMI. */ rcu_list_add(cblist, cb); rd->nqueued++; smp_mb(); raw_local_irq_restore(flags); }
/* * Test the trace buffer to see if all the elements * are still sane. */ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) { unsigned long flags, cnt = 0; int cpu, ret = 0; /* Don't allow flipping of max traces now */ raw_local_irq_save(flags); __raw_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } __raw_spin_unlock(&ftrace_max_lock); raw_local_irq_restore(flags); if (count) *count = cnt; return ret; }
u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; u64 now; raw_local_irq_save(flags); this_cpu = raw_smp_processor_id(); now = cpu_clock(this_cpu); /* * If in an NMI context then dont risk lockups and return the * cpu_clock() time: */ if (unlikely(in_nmi())) goto out; __raw_spin_lock(&trace_clock_struct.lock); /* * TODO: if this happens often then maybe we should reset * my_scd->clock to prev_time+1, to make sure * we start ticking with the local clock from now on? */ if ((s64)(now - trace_clock_struct.prev_time) < 0) now = trace_clock_struct.prev_time + 1; trace_clock_struct.prev_time = now; __raw_spin_unlock(&trace_clock_struct.lock); out: raw_local_irq_restore(flags); return now; }
/** * program the FPGA (parallel mode). * return 0 if success, >0 while programming, <0 if error detected */ static size_t spartan_parallel_load(struct fpga_desc *desc, const char* buf, size_t bsize) { unsigned long flags; Xilinx_Spartan3_Slave_Parallel_fns *fn = desc->iface_fns; if (fn) { /* Load the data */ size_t nbbyte = 0; raw_local_irq_save(flags); while (nbbyte < bsize) { (*fn->clk) (1); /* Assert the clock pin */ CONFIG_FPGA_DELAY (); (*fn->wdata) (buf[nbbyte++]); /* write the data */ CONFIG_FPGA_DELAY (); (*fn->clk) (0); /* Deassert the clock pin */ bytecount++; } raw_local_irq_restore(flags); return bsize; } return -EINVAL; }
asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; int current_log_level = default_message_loglevel; unsigned long flags; int this_cpu; char *p; boot_delay_msec(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress) { recursion_bug = 1; goto out_restore_irqs; } zap_locks(); } lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (recursion_bug) { recursion_bug = 0; strcpy(printk_buf, recursion_bug_msg); printed_len = strlen(recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); #ifdef CONFIG_DEBUG_LL printascii(printk_buf); #endif /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for (p = printk_buf; *p; p++) { if (new_text_line) { /* If a token, set current_log_level and skip over */ if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' && p[2] == '>') { current_log_level = p[1] - '0'; p += 3; printed_len -= 3; } /* Always output the token */ emit_log_char('<'); emit_log_char(current_log_level + '0'); emit_log_char('>'); printed_len += 3; new_text_line = 0; if (printk_time) { /* Follow the token with the time */ char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(printk_cpu); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } if (!*p) break; } emit_log_char(*p); if (*p == '\n') new_text_line = 1; } /* * Try to acquire and then immediately release the * console semaphore. The release will do all the * actual magic (print out buffers, wake up klogd, * etc). * * The acquire_console_semaphore_for_printk() function * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ if (acquire_console_semaphore_for_printk(this_cpu)) release_console_sem(); lockdep_on(); out_restore_irqs: raw_local_irq_restore(flags); preempt_enable(); return printed_len; }
asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; int current_log_level = default_message_loglevel; unsigned long flags; int this_cpu; char *p; boot_delay_msec(); printk_delay(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress) { recursion_bug = 1; goto out_restore_irqs; } zap_locks(); } lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (recursion_bug) { recursion_bug = 0; strcpy(printk_buf, recursion_bug_msg); printed_len = strlen(recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); trace_kernel_vprintk(_RET_IP_, printk_buf, printed_len); p = printk_buf; /* Do we have a loglevel in the string? */ if (p[0] == '<') { unsigned char c = p[1]; if (c && p[2] == '>') { switch (c) { case '0' ... '7': /* loglevel */ current_log_level = c - '0'; /* Fallthrough - make sure we're on a new line */ case 'd': /* KERN_DEFAULT */ if (!new_text_line) { emit_log_char('\n'); new_text_line = 1; } /* Fallthrough - skip the loglevel */ case 'c': /* KERN_CONT */ p += 3; break; } } }
static ssize_t mv_cust_spec_proc_1_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { const char* name = attr->attr.name; unsigned int v; unsigned long flags; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Read input */ v = 0; sscanf(buf, "%x", &v); raw_local_irq_save(flags); if (!strcmp(name, "debug")) { mv_cust_debug_info_set(v); } else if (!strcmp(name, "omci_type")) { mv_cust_omci_type_set(v); } else if (!strcmp(name, "eoam_type")) { mv_cust_epon_oam_type_set(v); } else if (!strcmp(name, "eoam_txq")) { eoam_txq = v; } else if (!strcmp(name, "eoam_write")) { mv_cust_eoam_llid_set((int)v, &eoam_mac[0], eoam_txq); } /* else if (!strcmp(name, "omci_cmd")) { mv_cust_omci_hw_cmd_set(v); } */ /* else if (!strcmp(name, "omci_gemp")) { mv_cust_omci_gemport_set(v); } */ /* else if (!strcmp(name, "omci_gh_keep")) { mv_cust_xpon_oam_rx_gh_set(v); } */ else if (!strcmp(name, "eoam_gh_keep")) { mv_cust_xpon_oam_rx_gh_set(v); } else if (!strcmp(name, "loopdet_type")) { mv_cust_loopdet_type_set(v); } else if (!strcmp(name, "omci_enable")) { mv_cust_omci_enable(v); } else if (!strcmp(name, "eoam_enable")) { mv_cust_eoam_enable(v); } else printk("%s: illegal operation <%s>\n", __FUNCTION__, attr->attr.name); raw_local_irq_restore(flags); return len; }
asmlinkage int vprintk(const char *fmt, va_list args) { unsigned long flags; int printed_len; char *p; static char printk_buf[1024]; static int log_level_unknown = 1; boot_delay_msec(); preempt_disable(); if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id()) /* If a crash is occurring during printk() on this CPU, * make sure we can't deadlock */ zap_locks(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = smp_processor_id(); /* Emit the output into the temporary buffer */ printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args); /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for (p = printk_buf; *p; p++) { if (log_level_unknown) { /* log_level_unknown signals the start of a new line */ if (printk_time) { int loglev_char; char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; /* * force the log level token to be * before the time output. */ if (p[0] == '<' && p[1] >='0' && p[1] <= '7' && p[2] == '>') { loglev_char = p[1]; p += 3; printed_len -= 3; } else { loglev_char = default_message_loglevel + '0'; } t = printk_clock(); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "<%c>[%5lu.%06lu] ", loglev_char, (unsigned long)t, nanosec_rem/1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } else { if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>') { emit_log_char('<'); emit_log_char(default_message_loglevel + '0'); emit_log_char('>'); printed_len += 3; } } log_level_unknown = 0; if (!*p) break; } emit_log_char(*p); if (*p == '\n') log_level_unknown = 1; } if (!down_trylock(&console_sem)) { /* * We own the drivers. We can drop the spinlock and * let release_console_sem() print the text, maybe ... */ console_locked = 1; printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); /* * Console drivers may assume that per-cpu resources have * been allocated. So unless they're explicitly marked as * being able to cope (CON_ANYTIME) don't call them until * this CPU is officially up. */ if (cpu_online(smp_processor_id()) || have_callable_console()) { console_may_schedule = 0; release_console_sem(); } else { /* Release by hand to avoid flushing the buffer. */ console_locked = 0; up(&console_sem); } lockdep_on(); raw_local_irq_restore(flags); } else { /* * Someone else owns the drivers. We drop the spinlock, which * allows the semaphore holder to proceed and to call the * console drivers with the output which we just produced. */ printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); lockdep_on(); raw_local_irq_restore(flags); } preempt_enable(); return printed_len; }
asmlinkage int vprintk(const char *fmt, va_list args) { static int log_level_unknown = 1; static char printk_buf[1024]; unsigned long flags; int printed_len = 0; int this_cpu; char *p; boot_delay_msec(); preempt_disable(); /* This stops the holder of console_sem just where we want him */ /* 20100623,HSL@RK,audio broken,but system crash if comment */ raw_local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress) { printk_recursion_bug = 1; // call printk when at printk. goto out_restore_irqs; } zap_locks(); } lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (printk_recursion_bug) { printk_recursion_bug = 0; strcpy(printk_buf, printk_recursion_bug_msg); printed_len = sizeof(printk_recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); //#ifdef CONFIG_DEBUG_LL // printascii(printk_buf); //#endif /* * Copy the output into log_buf. If the caller didn't provide * appropriate log level tags, we insert them here */ for (p = printk_buf; *p; p++) { if (log_level_unknown) { /* log_level_unknown signals the start of a new line */ if (printk_time) { int loglev_char; char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; /* * force the log level token to be * before the time output. */ if (p[0] == '<' && p[1] >='0' && p[1] <= '7' && p[2] == '>') { loglev_char = p[1]; p += 3; printed_len -= 3; } else { loglev_char = default_message_loglevel + '0'; } #if 0 t = cpu_clock(printk_cpu); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "<%c>[%5lu.%06lu] ", loglev_char, (unsigned long)t, nanosec_rem/1000); #else nanosec_rem = nanosec_rem; t =t; tlen = sprintf(tbuf, "[%lu] ", printk_clock()); #endif for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } else { if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>') { emit_log_char('<'); emit_log_char(default_message_loglevel + '0'); emit_log_char('>'); printed_len += 3; } } log_level_unknown = 0; if (!*p) break; } emit_log_char(*p); if (*p == '\n') log_level_unknown = 1; } /* * Try to acquire and then immediately release the * console semaphore. The release will do all the * actual magic (print out buffers, wake up klogd, * etc). * * The acquire_console_semaphore_for_printk() function * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ if (acquire_console_semaphore_for_printk(this_cpu)) release_console_sem(); lockdep_on(); out_restore_irqs: raw_local_irq_restore(flags); preempt_enable(); return printed_len; }
static ssize_t tcam_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { const char* name = attr->attr.name; unsigned int err=0, a=0, b=0; unsigned long flags; if (!capable(CAP_NET_ADMIN)) return -EPERM; sscanf(buf,"%x %x",&a, &b); raw_local_irq_save(flags); if (!strcmp(name, "hw_write")) tcam_hw_write(&te, a); else if (!strcmp(name, "hw_read")) tcam_hw_read(&te, a); else if (!strcmp(name, "hw_debug")) tcam_hw_debug(a); else if (!strcmp(name, "hw_inv")) tcam_hw_inv(a); else if (!strcmp(name, "hw_inv_all")) tcam_hw_inv_all(); else if (!strcmp(name, "hw_hits")) tcam_hw_record(a); #ifdef CONFIG_MV_ETH_PNC_AGING else if (!strcmp(name, "age_clear")) mvPncAgingCntrClear(a); else if (!strcmp(name, "age_cntr")) { b = mvPncAgingCntrRead(a); printk("tid=%d: age_cntr = 0x%08x\n", a, b); } #endif /* CONFIG_MV_ETH_PNC_AGING */ else if (!strcmp(name, "sw_clear")) tcam_sw_clear(&te); else if (!strcmp(name, "sw_text")) { /* Remove last byte (new line) from the buffer */ int len = strlen(buf); char* temp = mvOsMalloc(len + 1); strncpy(temp, buf, len-1); temp[len-1] = 0; tcam_sw_text(&te, temp); mvOsFree(temp); } else if (!strcmp(name, "t_port")) tcam_sw_set_port(&te, a, b); else if (!strcmp(name, "t_lookup")) tcam_sw_set_lookup(&te, a); else if (!strcmp(name, "t_ainfo_0")) tcam_sw_set_ainfo(&te, 0<<a, 1<<a); else if (!strcmp(name, "t_ainfo_1")) tcam_sw_set_ainfo(&te, 1<<a, 1<<a); else if (!strcmp(name, "t_ainfo")) tcam_sw_set_ainfo(&te, a, b); else if (!strcmp(name, "t_offset_byte")) tcam_sw_set_byte(&te, a, b); else if (!strcmp(name, "t_offset_mask")) tcam_sw_set_mask(&te, a, b); else if (!strcmp(name, "s_lookup")) sram_sw_set_next_lookup(&te, a); else if (!strcmp(name, "s_ainfo")) sram_sw_set_ainfo(&te, a, b); else if (!strcmp(name, "s_lookup_done")) sram_sw_set_lookup_done(&te, a); else if (!strcmp(name, "s_next_lookup_shift")) sram_sw_set_next_lookup_shift(&te, a); else if (!strcmp(name, "s_rxq")) sram_sw_set_rxq(&te, a, b); else if (!strcmp(name, "s_shift_update")) sram_sw_set_shift_update(&te,a,b); else if (!strcmp(name, "s_rinfo")) sram_sw_set_rinfo(&te, 1 << a); else if (!strcmp(name, "s_rinfo_extra")) sram_sw_set_rinfo_extra(&te, a << (b & ~1)); else if (!strcmp(name, "s_flowid")) sram_sw_set_flowid(&te, a, b); else if (!strcmp(name, "s_flowid_nibble")) sram_sw_set_flowid_nibble(&te, a, b); #ifdef CONFIG_MV_ETH_PNC_AGING else if (!strcmp(name, "age_gr_set")) mvPncAgingCntrGroupSet(a, b); #endif /* CONFIG_MV_ETH_PNC_AGING */ else { err = 1; printk("%s: illegal operation <%s>\n", __FUNCTION__, attr->attr.name); } raw_local_irq_restore(flags); if (err) printk("%s: <%s>, error %d\n", __FUNCTION__, attr->attr.name, err); return err ? -EINVAL : len; }
static void probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) { struct trace_event_call *call = &event_branch; struct trace_array *tr = branch_tracer; struct trace_array_cpu *data; struct ring_buffer_event *event; struct trace_branch *entry; struct ring_buffer *buffer; unsigned long flags; int pc; const char *p; if (current->trace_recursion & TRACE_BRANCH_BIT) return; /* * I would love to save just the ftrace_likely_data pointer, but * this code can also be used by modules. Ugly things can happen * if the module is unloaded, and then we go and read the * pointer. This is slower, but much safer. */ if (unlikely(!tr)) return; raw_local_irq_save(flags); current->trace_recursion |= TRACE_BRANCH_BIT; data = this_cpu_ptr(tr->trace_buffer.data); if (atomic_read(&data->disabled)) goto out; pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, sizeof(*entry), flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); /* Strip off the path, only save the file */ p = f->data.file + strlen(f->data.file); while (p >= f->data.file && *p != '/') p--; p++; strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); strncpy(entry->file, p, TRACE_FILE_SIZE); entry->func[TRACE_FUNC_SIZE] = 0; entry->file[TRACE_FILE_SIZE] = 0; entry->constant = f->constant; entry->line = f->data.line; entry->correct = val == expect; if (!call_filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit_nostack(buffer, event); out: current->trace_recursion &= ~TRACE_BRANCH_BIT; raw_local_irq_restore(flags); }