/* * Garbage collector for unused keys. * * This is done in process context so that we don't have to disable interrupts * all over the place. key_put() schedules this rather than trying to do the * cleanup itself, which means key_put() doesn't have to sleep. */ static void key_garbage_collector(struct work_struct *work) { static LIST_HEAD(graveyard); static u8 gc_state; /* Internal persistent state */ #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */ #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */ #define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */ #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */ #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */ #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */ #define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */ struct rb_node *cursor; struct key *key; time_t new_timer, limit; kenter("[%lx,%x]", key_gc_flags, gc_state); limit = current_kernel_time().tv_sec; if (limit > key_gc_delay) limit -= key_gc_delay; else limit = key_gc_delay; /* Work out what we're going to be doing in this pass */ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; gc_state <<= 1; if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) gc_state |= KEY_GC_REAPING_DEAD_1; kdebug("new pass %x", gc_state); new_timer = LONG_MAX; /* As only this function is permitted to remove things from the key * serial tree, if cursor is non-NULL then it will always point to a * valid node in the tree - even if lock got dropped. */ spin_lock(&key_serial_lock); cursor = rb_first(&key_serial_tree); continue_scanning: while (cursor) { key = rb_entry(cursor, struct key, serial_node); cursor = rb_next(cursor); if (atomic_read(&key->usage) == 0) goto found_unreferenced_key; if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { if (key->type == key_gc_dead_keytype) { gc_state |= KEY_GC_FOUND_DEAD_KEY; set_bit(KEY_FLAG_DEAD, &key->flags); key->perm = 0; goto skip_dead_key; } } if (gc_state & KEY_GC_SET_TIMER) { if (key->expiry > limit && key->expiry < new_timer) { kdebug("will expire %x in %ld", key_serial(key), key->expiry - limit); new_timer = key->expiry; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) if (key->type == key_gc_dead_keytype) gc_state |= KEY_GC_FOUND_DEAD_KEY; if ((gc_state & KEY_GC_REAPING_LINKS) || unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { if (key->type == &key_type_keyring) goto found_keyring; } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) if (key->type == key_gc_dead_keytype) goto destroy_dead_key; skip_dead_key: if (spin_is_contended(&key_serial_lock) || need_resched()) goto contended; } contended: spin_unlock(&key_serial_lock); maybe_resched: if (cursor) { cond_resched(); spin_lock(&key_serial_lock); goto continue_scanning; } /* We've completed the pass. Set the timer if we need to and queue a * new cycle if necessary. We keep executing cycles until we find one * where we didn't reap any keys. */ kdebug("pass complete"); if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) || !list_empty(&graveyard)) { /* Make sure that all pending keyring payload destructions are * fulfilled and that people aren't now looking at dead or * dying keys that they don't have a reference upon or a link * to. */ kdebug("gc sync"); synchronize_rcu(); } if (!list_empty(&graveyard)) { kdebug("gc keys"); key_gc_unused_keys(&graveyard); } if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2))) { if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) { /* No remaining dead keys: short circuit the remaining * keytype reap cycles. */ kdebug("dead short"); gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2); gc_state |= KEY_GC_REAPING_DEAD_3; } else { gc_state |= KEY_GC_REAP_AGAIN; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) { kdebug("dead wake"); smp_mb(); clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE); } if (gc_state & KEY_GC_REAP_AGAIN) schedule_work(&key_gc_work); kleave(" [end %x]", gc_state); return; /* We found an unreferenced key - once we've removed it from the tree, * we can safely drop the lock. */ found_unreferenced_key: kdebug("unrefd key %d", key->serial); rb_erase(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); list_add_tail(&key->graveyard_link, &graveyard); gc_state |= KEY_GC_REAP_AGAIN; goto maybe_resched; /* We found a keyring and we need to check the payload for links to * dead or expired keys. We don't flag another reap immediately as we * have to wait for the old payload to be destroyed by RCU before we * can reap the keys to which it refers. */ found_keyring: spin_unlock(&key_serial_lock); kdebug("scan keyring %d", key->serial); key_gc_keyring(key, limit); goto maybe_resched; /* We found a dead key that is still referenced. Reset its type and * destroy its payload with its semaphore held. */ destroy_dead_key: spin_unlock(&key_serial_lock); kdebug("destroy key %d", key->serial); down_write(&key->sem); key->type = &key_type_dead; if (key_gc_dead_keytype->destroy) key_gc_dead_keytype->destroy(key); memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); up_write(&key->sem); goto maybe_resched; }
static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; char xbuf[12]; int rc; /* check whether the current task is allowed to view the key (assuming * non-possession) * - the caller holds a spinlock, and thus the RCU read lock, making our * access to __current_cred() safe */ rc = key_task_permission(make_key_ref(key, 0), current_cred(), KEY_VIEW); if (rc < 0) return 0; now = current_kernel_time(); rcu_read_lock(); /* come up with a suitable timeout value */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), showflag(key, 'N', KEY_FLAG_NEGATIVE), atomic_read(&key->usage), xbuf, key->perm, key->uid, key->gid, key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; }
/** * Transmit a packet. * This is a helper function for ctcm_tx(). * * ch Channel to be used for sending. * skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by ctcm_tx(). * * returns 0 on success, -ERRNO on failure. (Never fails.) */ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; /* we need to acquire the lock for testing the state * otherwise we can have an IRQ changing the state to * TXIDLE after the test but before acquiring the lock. */ spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); /* * Protect skb against beeing free'd by upper * layers. */ atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; /* * IDAL support in CTCM is broken, so we have to * care about skb's above 2G ourselves. */ hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { /* * idal allocation failed, try via copying to * trans_skb. trans_skb usually has a pre-allocated * idal. */ if (ctcm_checkalloc_buffer(ch)) { /* * Remove our header. It gets added * again on retransmit. */ atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); /* xtime */ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); /* * Remove our header. It gets added * again on retransmit. */ skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }
unsigned long long ras_ns(void) { struct timespec ts = current_kernel_time(); return ts.tv_sec * 1000000000ULL + ts.tv_nsec; }
uint64_t now(void) { struct timespec ts; ts = current_kernel_time(); return ts.tv_sec * 1000000000LL + ts.tv_nsec; }
static int proc_keys_show(struct seq_file *m, void *v) { const struct cred *cred = current_cred(); struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; key_ref_t key_ref, skey_ref; char xbuf[12]; int rc; key_ref = make_key_ref(key, 0); /* */ if (key->perm & KEY_POS_VIEW) { skey_ref = search_my_process_keyrings(key->type, key, lookup_user_key_possessed, true, cred); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); } } /* */ rc = key_task_permission(key_ref, cred, KEY_VIEW); if (rc < 0) return 0; now = current_kernel_time(); rcu_read_lock(); /* */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), showflag(key, 'N', KEY_FLAG_NEGATIVE), atomic_read(&key->usage), xbuf, key->perm, key->uid, key->gid, key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; }
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp) { *tp = current_kernel_time(); return 0; }
static void key_garbage_collector(struct work_struct *work) { static u8 gc_state; #define KEY_GC_REAP_AGAIN 0x01 #define KEY_GC_REAPING_LINKS 0x02 #define KEY_GC_SET_TIMER 0x04 #define KEY_GC_REAPING_DEAD_1 0x10 #define KEY_GC_REAPING_DEAD_2 0x20 #define KEY_GC_REAPING_DEAD_3 0x40 #define KEY_GC_FOUND_DEAD_KEY 0x80 struct rb_node *cursor; struct key *key; time_t new_timer, limit; kenter("[%lx,%x]", key_gc_flags, gc_state); limit = current_kernel_time().tv_sec; if (limit > key_gc_delay) limit -= key_gc_delay; else limit = key_gc_delay; gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; gc_state <<= 1; if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) gc_state |= KEY_GC_REAPING_DEAD_1; kdebug("new pass %x", gc_state); new_timer = LONG_MAX; spin_lock(&key_serial_lock); cursor = rb_first(&key_serial_tree); continue_scanning: while (cursor) { key = rb_entry(cursor, struct key, serial_node); cursor = rb_next(cursor); if (atomic_read(&key->usage) == 0) goto found_unreferenced_key; if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { if (key->type == key_gc_dead_keytype) { gc_state |= KEY_GC_FOUND_DEAD_KEY; set_bit(KEY_FLAG_DEAD, &key->flags); key->perm = 0; goto skip_dead_key; } } if (gc_state & KEY_GC_SET_TIMER) { if (key->expiry > limit && key->expiry < new_timer) { kdebug("will expire %x in %ld", key_serial(key), key->expiry - limit); new_timer = key->expiry; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) if (key->type == key_gc_dead_keytype) gc_state |= KEY_GC_FOUND_DEAD_KEY; if ((gc_state & KEY_GC_REAPING_LINKS) || unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { if (key->type == &key_type_keyring) goto found_keyring; } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) if (key->type == key_gc_dead_keytype) goto destroy_dead_key; skip_dead_key: if (spin_is_contended(&key_serial_lock) || need_resched()) goto contended; } contended: spin_unlock(&key_serial_lock); maybe_resched: if (cursor) { cond_resched(); spin_lock(&key_serial_lock); goto continue_scanning; } kdebug("pass complete"); if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { kdebug("dead sync"); synchronize_rcu(); } if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2))) { if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) { kdebug("dead short"); gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2); gc_state |= KEY_GC_REAPING_DEAD_3; } else { gc_state |= KEY_GC_REAP_AGAIN; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) { kdebug("dead wake"); smp_mb(); clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE); } if (gc_state & KEY_GC_REAP_AGAIN) queue_work(system_nrt_wq, &key_gc_work); kleave(" [end %x]", gc_state); return; found_unreferenced_key: kdebug("unrefd key %d", key->serial); rb_erase(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); key_gc_unused_key(key); gc_state |= KEY_GC_REAP_AGAIN; goto maybe_resched; found_keyring: spin_unlock(&key_serial_lock); kdebug("scan keyring %d", key->serial); key_gc_keyring(key, limit); goto maybe_resched; destroy_dead_key: spin_unlock(&key_serial_lock); kdebug("destroy key %d", key->serial); down_write(&key->sem); key->type = &key_type_dead; if (key_gc_dead_keytype->destroy) key_gc_dead_keytype->destroy(key); memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); up_write(&key->sem); goto maybe_resched; }
static void msm8960_enable_ext_spk_amp_gpio(u32 spk_amp_gpio) { int ret = 0; struct pm_gpio param = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; if (spk_amp_gpio == BOTTOM_SPK_PAMP_GPIO) { ret = gpio_request(BOTTOM_SPK_PAMP_GPIO, "BOTTOM_SPK_AMP"); if (ret) { pr_aud_err("%s: Error requesting BOTTOM SPK AMP GPIO %u\n", __func__, BOTTOM_SPK_PAMP_GPIO); return; } ret = pm8xxx_gpio_config(BOTTOM_SPK_PAMP_GPIO, ¶m); if (ret) pr_aud_err("%s: Failed to configure Bottom Spk Ampl" " gpio %u\n", __func__, BOTTOM_SPK_PAMP_GPIO); else { pr_debug("%s: enable spkr amp gpio\n", __func__); gpio_direction_output(BOTTOM_SPK_PAMP_GPIO, 1); } } else if (spk_amp_gpio == TOP_SPK_PAMP_GPIO) { ret = gpio_request(TOP_SPK_PAMP_GPIO, "TOP_SPK_AMP"); if (ret) { pr_aud_err("%s: Error requesting GPIO %d\n", __func__, TOP_SPK_PAMP_GPIO); return; } ret = pm8xxx_gpio_config(TOP_SPK_PAMP_GPIO, ¶m); if (ret) pr_aud_err("%s: Failed to configure Top Spk Ampl" " gpio %u\n", __func__, TOP_SPK_PAMP_GPIO); else { pr_debug("%s: enable hac amp gpio\n", __func__); gpio_direction_output(TOP_SPK_PAMP_GPIO, 1); } } else if (spk_amp_gpio == DOCK_SPK_PAMP_GPIO) { ret = gpio_request(DOCK_SPK_PAMP_GPIO, "DOCK_SPK_AMP"); if (ret) { pr_aud_err("%s: Error requesting GPIO %d\n", __func__, DOCK_SPK_PAMP_GPIO); return; } ret = pm8xxx_gpio_config(DOCK_SPK_PAMP_GPIO, ¶m); if (ret) pr_aud_err("%s: Failed to configure Dock Spk Ampl" " gpio %u\n", __func__, DOCK_SPK_PAMP_GPIO); else { pr_debug("%s: enable dock amp gpio\n", __func__); gpio_direction_output(DOCK_SPK_PAMP_GPIO, 1); } ret = gpio_request(USB_ID_ADC_GPIO, "USB_ID_ADC"); if (ret) { pr_aud_err("%s: Error requesting USB_ID_ADC PMIC GPIO %u\n", __func__, USB_ID_ADC_GPIO); return; } ret = pm8xxx_gpio_config(USB_ID_ADC_GPIO, ¶m); if (ret) pr_aud_err("%s: Failed to configure USB_ID_ADC PMIC" " gpio %u\n", __func__, USB_ID_ADC_GPIO); } else { pr_aud_err("%s: ERROR : Invalid External Speaker Ampl GPIO." " gpio = %u\n", __func__, spk_amp_gpio); return; } } static void msm8960_ext_spk_power_amp_on(u32 spk) { #ifdef CONFIG_AUDIO_USAGE_FOR_POWER_CONSUMPTION g_spk_flag = 1; g_spk_start_time = current_kernel_time(); #endif if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if ((msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { pr_debug("%s() External Bottom Speaker Ampl already " "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_bottom_spk_pamp |= spk; if ((msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { msm8960_enable_ext_spk_amp_gpio(BOTTOM_SPK_PAMP_GPIO); pr_debug("%s: slepping 4 ms after turning on external " " Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if ((msm8960_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm8960_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { pr_debug("%s() External Top Speaker Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_top_spk_pamp |= spk; if ((msm8960_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm8960_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { msm8960_enable_ext_spk_amp_gpio(TOP_SPK_PAMP_GPIO); pr_debug("%s: sleeping 4 ms after turning on " " external HAC Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (DOCK_SPK_AMP_POS | DOCK_SPK_AMP_NEG)) { mutex_lock(&audio_notifier_lock); if ((msm8960_ext_dock_spk_pamp & DOCK_SPK_AMP_POS) && (msm8960_ext_dock_spk_pamp & DOCK_SPK_AMP_NEG)) { pr_debug("%s() External Dock Speaker Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_dock_spk_pamp |= spk; if ((msm8960_ext_dock_spk_pamp & DOCK_SPK_AMP_POS) && (msm8960_ext_dock_spk_pamp & DOCK_SPK_AMP_NEG)) { msm8960_enable_ext_spk_amp_gpio(DOCK_SPK_PAMP_GPIO); pr_debug("%s: sleeping 4 ms after turning on " " external DOCK Ampl\n", __func__); usleep_range(4000, 4000); } mutex_unlock(&audio_notifier_lock); } else { pr_aud_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n", __func__, spk); return; } }
/* * Garbage collector for keys * - this involves scanning the keyrings for dead, expired and revoked keys * that have overstayed their welcome */ static void key_garbage_collector(struct work_struct *work) { struct rb_node *rb; key_serial_t cursor; struct key *key, *xkey; time_t new_timer = LONG_MAX, limit, now; now = current_kernel_time().tv_sec; kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now); if (test_and_set_bit(0, &key_gc_executing)) { key_schedule_gc(current_kernel_time().tv_sec + 1); kleave(" [busy; deferring]"); return; } limit = now; if (limit > key_gc_delay) limit -= key_gc_delay; else limit = key_gc_delay; spin_lock(&key_serial_lock); if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) { spin_unlock(&key_serial_lock); clear_bit(0, &key_gc_executing); return; } cursor = key_gc_cursor; if (cursor < 0) cursor = 0; if (cursor > 0) new_timer = key_gc_new_timer; else key_gc_again = false; /* find the first key above the cursor */ key = NULL; rb = key_serial_tree.rb_node; while (rb) { xkey = rb_entry(rb, struct key, serial_node); if (cursor < xkey->serial) { key = xkey; rb = rb->rb_left; } else if (cursor > xkey->serial) { rb = rb->rb_right; } else { rb = rb_next(rb); if (!rb) goto reached_the_end; key = rb_entry(rb, struct key, serial_node); break; } } if (!key) goto reached_the_end; /* trawl through the keys looking for keyrings */ for (;;) { if (key->expiry > limit && key->expiry < new_timer) { kdebug("will expire %x in %ld", key_serial(key), key->expiry - limit); new_timer = key->expiry; } if (key->type == &key_type_keyring && key_gc_keyring(key, limit)) /* the gc had to release our lock so that the keyring * could be modified, so we have to get it again */ goto gc_released_our_lock; rb = rb_next(&key->serial_node); if (!rb) goto reached_the_end; key = rb_entry(rb, struct key, serial_node); } gc_released_our_lock: kdebug("gc_released_our_lock"); key_gc_new_timer = new_timer; key_gc_again = true; clear_bit(0, &key_gc_executing); schedule_work(&key_gc_work); kleave(" [continue]"); return; /* when we reach the end of the run, we set the timer for the next one */ reached_the_end: kdebug("reached_the_end"); spin_unlock(&key_serial_lock); key_gc_new_timer = new_timer; key_gc_cursor = 0; clear_bit(0, &key_gc_executing); if (key_gc_again) { /* there may have been a key that expired whilst we were * scanning, so if we discarded any links we should do another * scan */ new_timer = now + 1; key_schedule_gc(new_timer); } else if (new_timer < LONG_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } kleave(" [end]"); }
static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) { struct task_struct *p; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_adj = OOM_ADJUST_MAX + 1; int selected_tasksize = 0; int selected_oom_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); // int other_file = global_page_state(NR_FILE_PAGES); int other_file = global_page_state(NR_INACTIVE_FILE) + global_page_state(NR_ACTIVE_FILE); /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ #if PERFOMANCE_RESEARCH LMK_LastWorkNumberPossProcesses = 0; LMK_PreviousWorkingTime = LMK_CurrentWorkingTime; LMK_CurrentWorkingTime = current_kernel_time(); printk("LowMemoryKiller has started!!!\n Worked <%d> times before PreviosWorkingTime<%ds><%dns>\n CurrentWorkingTime<%ds><%dns>\n", LMK_WorkCount++,LMK_PreviousWorkingTime.tv_sec,LMK_PreviousWorkingTime.tv_nsec,LMK_CurrentWorkingTime.tv_sec,LMK_CurrentWorkingTime.tv_nsec); #endif if (lowmem_deathpending) { #if PERFOMANCE_RESEARCH printk("LowMemoryKiller is going to exit because of lowmem_deathpending\n"); #endif return 0; } if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { #if 1 if ((other_free + other_file) < lowmem_minfree[i]) #else if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) #endif { min_adj = lowmem_adj[i]; break; } } if (min_adj == OOM_ADJUST_MAX + 1) return 0; if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n", nr_to_scan, gfp_mask, other_free, other_file, min_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); #if PERFOMANCE_RESEARCH printk("LowMemoryKiller semi-data: %d %x %d %d %d %d\n",nr_to_scan,gfp_mask,other_free,other_file,min_adj,rem); #endif if (nr_to_scan <= 0) { lowmem_print(5, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem); return rem; } selected_oom_adj = min_adj; read_lock(&tasklist_lock); for_each_process(p) { struct mm_struct *mm; struct signal_struct *sig; int oom_adj; task_lock(p); mm = p->mm; sig = p->signal; if (!mm || !sig) { task_unlock(p); continue; } oom_adj = sig->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_adj < selected_oom_adj) continue; if (oom_adj == selected_oom_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_adj = oom_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); #if PERFOMANCE_RESEARCH LMK_LastWorkNumberPossProcesses++; printk("LowMemoryKiller has found process for killing: pid=%d name=%s with oom_adj=%d\n", p->pid, p->comm,oom_adj); #endif } if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_adj, selected_tasksize); #if PERFOMANCE_RESEARCH printk("LowMemoryKiller is going to KILL: pid=%d name=%s with oom_adj=%d with size=%d (has chosen from %d)\n", selected->pid, selected->comm,selected_oom_adj, selected_tasksize,LMK_LastWorkNumberPossProcesses); LMK_TotalNUmberOfKilledProcesses++; #endif lowmem_deathpending = selected; task_free_register(&task_nb); force_sig(SIGKILL, selected); rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem); read_unlock(&tasklist_lock); #if PERFOMANCE_RESEARCH LMK_TotalFreedMem+=selected_tasksize; printk("LowMemoryKiller : rem=%d, totalNumber=%d, selected_tasksize=%d, Totalfreed=%d ", rem, LMK_TotalNUmberOfKilledProcesses,selected_tasksize,LMK_TotalFreedMem); #endif return rem; }
struct timespec current_fs_time(struct super_block *sb) { struct timespec now = current_kernel_time(); return timespec_trunc(now, sb->s_time_gran); }
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { if (ctcm_checkalloc_buffer(ch)) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }
unsigned long long ras_ms(void) { struct timespec ts = current_kernel_time(); return ts.tv_sec * 1000ULL + ras_div(ts.tv_nsec,1000000); }
//CAUTION!!!this function have to be called within a block of logger buffer lock, otherwise it might disorder log entry. void alog_ram_console_sync_time(const LogType log_type, const SyncType sync_type) { struct timespec now; LoggerEntry header; unsigned long flags; char tbuf[50]; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; unsigned int prio = ANDROID_LOG_INFO; int i; // Div6-PT2-Core-BH-AlogRamConsole-02+ header.pid = current->tgid; header.tid = current->pid; spin_lock_irqsave(&bufflock, flags); // Div6-PT2-Core-BH-AlogRamConsole-02* now = current_kernel_time(); header.sec = now.tv_sec; header.nsec = now.tv_nsec; t = cpu_clock(smp_processor_id()); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000); header.len = tlen + 8; // Div6-PT2-Core-BH-AlogRamConsole-02*[ if (log_type == LOG_TYPE_ALL) { if (sync_type == SYNC_AFTER) printk(KERN_INFO "[LastAlog] alog_ram_console_sync_time %s for all log_type - After\n", tbuf); else printk(KERN_INFO "[LastAlog] alog_ram_console_sync_time %s for all log_type - Before\n", tbuf); for (i =LOG_TYPE_MAIN; i<LOG_TYPE_NUM; i++) { alog_ram_console_write_log(i, (char *)&header, (int)sizeof(LoggerEntry)); alog_ram_console_write_log(i, (char *)&prio, 1); if (sync_type == SYNC_AFTER) alog_ram_console_write_log(i, "SYNCA", 6); else alog_ram_console_write_log(i, "SYNCB", 6); alog_ram_console_write_log(i, tbuf, tlen+1); } } else { alog_ram_console_write_log(log_type, (char *)&header, (int)sizeof(LoggerEntry)); alog_ram_console_write_log(log_type, (char *)&prio, 1); if (sync_type == SYNC_AFTER) { printk(KERN_INFO "[LastAlog] alog_ram_console_sync_time %s for log_type:%d - After\n", tbuf, log_type); alog_ram_console_write_log(log_type, "SYNCA", 6); } else { printk(KERN_INFO "[LastAlog] alog_ram_console_sync_time %s for log_type:%d - Before\n", tbuf, log_type); alog_ram_console_write_log(log_type, "SYNCB", 6); } alog_ram_console_write_log(log_type, tbuf, tlen+1); } // Div6-PT2-Core-BH-AlogRamConsole-02*] spin_unlock_irqrestore(&bufflock, flags); }
static void msm8960_ext_spk_power_amp_off(u32 spk) { struct pm_gpio param = { .direction = PM_GPIO_DIR_IN, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; #ifdef CONFIG_AUDIO_USAGE_FOR_POWER_CONSUMPTION g_spk_flag = 0; g_spk_end_time = current_kernel_time(); g_spk_total_time += (g_spk_end_time.tv_sec - g_spk_start_time.tv_sec); #endif if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if (!msm8960_ext_bottom_spk_pamp) return; gpio_direction_output(BOTTOM_SPK_PAMP_GPIO, 0); gpio_free(BOTTOM_SPK_PAMP_GPIO); msm8960_ext_bottom_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external" " Speaker Ampl\n", __func__); usleep_range(4000, 4000); } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if (!msm8960_ext_top_spk_pamp) return; gpio_direction_output(TOP_SPK_PAMP_GPIO, 0); gpio_free(TOP_SPK_PAMP_GPIO); msm8960_ext_top_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external" " HAC Ampl\n", __func__); usleep_range(4000, 4000); } else if (spk & (DOCK_SPK_AMP_POS | DOCK_SPK_AMP_NEG)) { mutex_lock(&audio_notifier_lock); if (!msm8960_ext_dock_spk_pamp) { mutex_unlock(&audio_notifier_lock); return; } gpio_direction_input(DOCK_SPK_PAMP_GPIO); gpio_free(DOCK_SPK_PAMP_GPIO); gpio_direction_input(USB_ID_ADC_GPIO); if (pm8xxx_gpio_config(USB_ID_ADC_GPIO, ¶m)) pr_aud_err("%s: Failed to configure USB_ID_ADC PMIC" " gpio %u\n", __func__, USB_ID_ADC_GPIO); gpio_free(USB_ID_ADC_GPIO); msm8960_ext_dock_spk_pamp = 0; mutex_unlock(&audio_notifier_lock); pr_debug("%s: sleeping 4 ms after turning off external" " DOCK Ampl\n", __func__); usleep_range(4000, 4000); } else { pr_aud_err("%s: ERROR : Invalid Ext Spk Ampl. spk = 0x%08x\n", __func__, spk); return; } }
static void msm8960_ext_spk_power_amp_on(u32 spk) { pr_aud_info("%s: enable external amp %x\n", __func__, spk); #ifdef CONFIG_AUDIO_USAGE_FOR_POWER_CONSUMPTION g_spk_flag = 1; g_spk_start_time = current_kernel_time(); #endif if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if ((msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { pr_debug("%s() External Bottom Speaker Ampl already " "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_bottom_spk_pamp |= spk; if ((msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { gpio_direction_output(TPA2051_PAMP_GPIO, 1); if (system_rev < 3) set_speaker_amp(1); pr_debug("%s: slepping 4 ms after turning on external " " Bottom Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (USB_EXT_AMP_POS | USB_EXT_AMP_NEG)) { if ((msm8960_ext_usb_aud_pamp & USB_EXT_AMP_POS) && (msm8960_ext_usb_aud_pamp & USB_EXT_AMP_NEG)) { pr_debug("%s() External USB Audio Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_usb_aud_pamp |= spk; if ((msm8960_ext_usb_aud_pamp & USB_EXT_AMP_POS) && (msm8960_ext_usb_aud_pamp & USB_EXT_AMP_NEG)) { gpio_direction_output(TPA2051_PAMP_GPIO, 1); if (system_rev < 3) set_usb_audio_amp(1); pr_debug("%s: sleeping 4 ms after turning on " " external USB Audio Ampl\n", __func__); usleep_range(4000, 4000); } } else { pr_aud_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n", __func__, spk); return; } }
static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; unsigned long timo; char xbuf[12]; now = current_kernel_time(); rcu_read_lock(); /* come up with a suitable timeout value */ if (key->expiry == 0) { memcpy(xbuf, "perm", 5); } else if (now.tv_sec >= key->expiry) { memcpy(xbuf, "expd", 5); } else { timo = key->expiry - now.tv_sec; if (timo < 60) sprintf(xbuf, "%lus", timo); else if (timo < 60*60) sprintf(xbuf, "%lum", timo / 60); else if (timo < 60*60*24) sprintf(xbuf, "%luh", timo / (60*60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%lud", timo / (60*60*24)); else sprintf(xbuf, "%luw", timo / (60*60*24*7)); } #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), showflag(key, 'D', KEY_FLAG_DEAD), showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), showflag(key, 'N', KEY_FLAG_NEGATIVE), atomic_read(&key->usage), xbuf, key->perm, key->uid, key->gid, key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; }
static ulong current_msec(void) { struct timespec ts = current_kernel_time(); return ts.tv_sec * 1000 + ts.tv_nsec / 1000000; }
static void hifi_dump_dsp(DUMP_DSP_INDEX index) { int ret = 0; mm_segment_t fs = 0; struct file *fp = NULL; int file_flag = O_RDWR; struct kstat file_stat; int write_size = 0; unsigned int err_no = 0xFFFFFFFF; char tmp_buf[64] = {0}; unsigned long tmp_len = 0; struct rtc_time cur_tm; struct timespec now; char* file_name = s_dsp_dump_info[index].file_name; char* data_addr = NULL; unsigned int data_len = s_dsp_dump_info[index].data_len; char* is_panic = "i'm panic.\n"; char* is_exception = "i'm exception.\n"; char* not_panic = "i'm ok.\n"; if ((index != NORMAL_LOG) && (index != PANIC_LOG) && g_om_data.is_watchdog_coming) { logi("watchdog is coming,so don't dump %s\n", file_name); return; } if (rdr_nv_get_value(RDR_NV_HIFI) != 1) { loge("do not save hifi log in nv config \n"); return; } if (down_interruptible(&g_om_data.dsp_dump_sema) < 0) { loge("acquire the semaphore error.\n"); return; } IN_FUNCTION; hifi_get_log_signal(); g_om_data.dsp_log_addr = (char*)ioremap_wc(DRV_DSP_UART_TO_MEM, DRV_DSP_UART_TO_MEM_SIZE); if (NULL == g_om_data.dsp_log_addr) { loge("dsp log ioremap_wc fail.\n"); goto END; } s_dsp_dump_info[NORMAL_LOG].data_addr = g_om_data.dsp_log_addr + DRV_DSP_UART_TO_MEM_RESERVE_SIZE; s_dsp_dump_info[PANIC_LOG].data_addr = g_om_data.dsp_log_addr + DRV_DSP_UART_TO_MEM_RESERVE_SIZE; if(index == OCRAM_BIN) { s_dsp_dump_info[index].data_addr = (unsigned char*)ioremap_wc(HIFI_OCRAM_BASE_ADDR, HIFI_IMAGE_OCRAMBAK_SIZE); } if(index == TCM_BIN) { s_dsp_dump_info[index].data_addr = (unsigned char*)ioremap_wc(HIFI_TCM_BASE_ADDR, HIFI_IMAGE_TCMBAK_SIZE); } if (NULL == s_dsp_dump_info[index].data_addr) { loge("dsp log ioremap_wc fail.\n"); goto END; } data_addr = s_dsp_dump_info[index].data_addr; fs = get_fs(); set_fs(KERNEL_DS); ret = hifi_create_dir(HIFI_LOG_PATH_PARENT); if (0 != ret) { goto END; } ret = hifi_create_dir(HIFI_LOG_PATH); if (0 != ret) { goto END; } ret = vfs_stat(file_name, &file_stat); if (ret < 0) { logi("there isn't a dsp log file:%s, and need to create.\n", file_name); file_flag |= O_CREAT; } fp = filp_open(file_name, file_flag, 0664); if (IS_ERR(fp)) { loge("open file fail: %s.\n", file_name); fp = NULL; goto END; } /*write from file start*/ vfs_llseek(fp, 0, SEEK_SET); /*write file head*/ if (DUMP_DSP_LOG == s_dsp_dump_info[index].dump_type) { /*write dump log time*/ now = current_kernel_time(); rtc_time_to_tm(now.tv_sec, &cur_tm); memset(tmp_buf, 0, 64); tmp_len = sprintf(tmp_buf, "%04d-%02d-%02d %02d:%02d:%02d.\n", cur_tm.tm_year+1900, cur_tm.tm_mon+1, cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min, cur_tm.tm_sec); vfs_write(fp, tmp_buf, tmp_len, &fp->f_pos); /*write exception no*/ memset(tmp_buf, 0, 64); err_no = (unsigned int)(*(g_om_data.dsp_exception_no)); if (err_no != 0xFFFFFFFF) { tmp_len = sprintf(tmp_buf, "the exception no: %u.\n", err_no); } else { tmp_len = sprintf(tmp_buf, "%s", "hifi is fine, just dump log.\n"); } vfs_write(fp, tmp_buf, tmp_len, &fp->f_pos); /*write error type*/ if (0xdeadbeaf == *g_om_data.dsp_panic_mark) { vfs_write(fp, is_panic, strlen(is_panic), &fp->f_pos); } else if(0xbeafdead == *g_om_data.dsp_panic_mark){ vfs_write(fp, is_exception, strlen(is_exception), &fp->f_pos); } else { vfs_write(fp, not_panic, strlen(not_panic), &fp->f_pos); } } /*write dsp info*/ if((write_size = vfs_write(fp, data_addr, data_len, &fp->f_pos)) < 0) { loge("write file fail.\n"); } logi("write file size: %d.\n", write_size); END: if (fp) { filp_close(fp, 0); } set_fs(fs); if (NULL != g_om_data.dsp_log_addr) { iounmap(g_om_data.dsp_log_addr); g_om_data.dsp_log_addr = NULL; } if((index == OCRAM_BIN || index == TCM_BIN) && (NULL != s_dsp_dump_info[index].data_addr)) { iounmap(s_dsp_dump_info[index].data_addr); s_dsp_dump_info[index].data_addr = NULL; } hifi_release_log_signal(); up(&g_om_data.dsp_dump_sema); OUT_FUNCTION; return; }
static void apanic_mmc_logbuf_dump(void) { struct apanic_data *ctx = &drv_ctx; struct panic_header *hdr = (struct panic_header *) ctx->bounce; int console_offset = 0; int console_len = 0; int threads_offset = 0; int threads_len = 0; int app_threads_offset = 0; int app_threads_len = 0; int rc = 0; struct timespec now; struct timespec uptime; struct rtc_time rtc_timestamp; struct console *con; if (!ctx->hd || !ctx->mmc_panic_ops || !ctx->mmc_panic_ops->panic_probe) return; if (ctx->mmc_panic_ops->panic_probe(ctx->hd, ctx->mmc_panic_ops->type)) { pr_err("apanic: choose to use mmc, " "but eMMC card not detected\n"); return; } console_offset = 1024; if (ctx->curr.magic) { pr_emerg("Crash partition in use!\n"); return; } /* * Add timestamp to displays current UTC time and uptime (in seconds). */ now = current_kernel_time(); rtc_time_to_tm((unsigned long) now.tv_sec, &rtc_timestamp); do_posix_clock_monotonic_gettime(&uptime); bust_spinlocks(1); pr_emerg("Timestamp = %lu.%03lu\n", (unsigned long) now.tv_sec, (unsigned long) (now.tv_nsec / 1000000)); pr_emerg("Current Time = " "%02d-%02d %02d:%02d:%lu.%03lu, " "Uptime = %lu.%03lu seconds\n", rtc_timestamp.tm_mon + 1, rtc_timestamp.tm_mday, rtc_timestamp.tm_hour, rtc_timestamp.tm_min, (unsigned long) rtc_timestamp.tm_sec, (unsigned long) (now.tv_nsec / 1000000), (unsigned long) uptime.tv_sec, (unsigned long) (uptime.tv_nsec / USEC_PER_SEC)); bust_spinlocks(0); if (ctx->annotation) printk(KERN_EMERG "%s\n", ctx->annotation); /* * Write out the console */ console_len = apanic_write_console_mmc(console_offset); if (console_len < 0) { pr_emerg("Error writing console to panic log! (%d)\n", console_len); console_len = 0; } /* * Write out all threads */ app_threads_offset = (ALIGN(console_offset + console_len, 1024) == 0) ? 1024 : ALIGN(console_offset + console_len, 1024); log_buf_clear(); for (con = console_drivers; con; con = con->next) con->flags &= ~CON_ENABLED; ctx->buf_offset = app_threads_offset; ctx->written = app_threads_offset; start_apanic_threads = 1; if (get_trace_buf_size() < (SZ_512K + 1)) ftrace_dump(1); show_state_filter(0, SHOW_APP_THREADS); ctx->buf_offset = ALIGN(ctx->written, 512); start_apanic_threads = 0; ctx->written += apanic_write_console_mmc(ctx->buf_offset); app_threads_len = ctx->written - app_threads_offset; log_buf_clear(); threads_offset = ALIGN(ctx->written, 512); ctx->buf_offset = threads_offset; ctx->written = threads_offset; start_apanic_threads = 1; show_state_filter(0, SHOW_KTHREADS); show_cpu_current_stack_mem(); start_apanic_threads = 0; ctx->buf_offset = ALIGN(ctx->written, 512); ctx->written += apanic_write_console_mmc(ctx->buf_offset); threads_len = ctx->written - threads_offset + 512; for (con = console_drivers; con; con = con->next) con->flags |= CON_ENABLED; /* * Finally write the panic header */ memset(ctx->bounce, 0, PAGE_SIZE); hdr->magic = PANIC_MAGIC; hdr->version = PHDR_VERSION; hdr->console_offset = console_offset; hdr->console_length = console_len; hdr->app_threads_offset = app_threads_offset; hdr->app_threads_length = app_threads_len; hdr->threads_offset = threads_offset; hdr->threads_length = threads_len; rc = ctx->mmc_panic_ops->panic_write(ctx->hd, ctx->bounce, 0, console_offset); if (rc <= 0) { pr_emerg("apanic: Header write failed (%d)\n", rc); return; } pr_emerg("apanic: Panic dump successfully written\n"); }
static int __init ipcs_module_init(void) { int rc = 0; int readyChkCnt = 0; struct timespec startTime, endTime; IPC_DEBUG(DBG_INFO,"[ipc]: ipcs_module_init start..\n"); init_MUTEX_LOCKED(&g_ipc_info.ipc_sem); g_ipc_info.ipc_state = 0; g_ipc_info.devnum = MKDEV(IPC_MAJOR, 0); rc = register_chrdev_region(g_ipc_info.devnum, 1, "bcm_fuse_ipc"); if (rc < 0) { IPC_DEBUG(DBG_ERROR,"Error registering the IPC device\n"); goto out; } cdev_init(&g_ipc_info.cdev, &ipc_ops); g_ipc_info.cdev.owner = THIS_MODULE; rc = cdev_add(&g_ipc_info.cdev, g_ipc_info.devnum, 1); if (rc) { IPC_DEBUG(DBG_ERROR,"[ipc]: cdev_add errpr\n"); goto out_unregister; } IPC_DEBUG(DBG_INFO, "[ipc]: create_workqueue\n"); INIT_WORK(&g_ipc_info.cp_crash_dump_wq, ProcessCPCrashedDump); INIT_WORK(&g_ipc_info.intr_work, ipcs_intr_workqueue_process); g_ipc_info.intr_workqueue = create_workqueue("ipc-wq"); if (!g_ipc_info.intr_workqueue) { IPC_DEBUG(DBG_ERROR,"[ipc]: cannot create workqueue\n"); goto out_unregister; } IPC_DEBUG(DBG_INFO, "[ipc]: request_irq\n"); rc = request_irq(IRQ_IPC_C2A, ipcs_interrupt, IRQF_NO_SUSPEND, "ipc-intr", &g_ipc_info); if (rc) { IPC_DEBUG(DBG_ERROR,"[ipc]: request_irq error\n"); goto out_del; } /** Make sure this is not cache'd because CP has to know about any changes we write to this memory immediately. */ IPC_DEBUG(DBG_INFO, "[ipc]: ioremap_nocache IPC_BASE\n"); g_ipc_info.apcp_shmem = ioremap_nocache(IPC_BASE, IPC_SIZE); if (!g_ipc_info.apcp_shmem) { rc = -ENOMEM; IPC_DEBUG(DBG_ERROR,"[ipc]: Could not map shmem\n"); goto out_del; } #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&ipc_wake_lock, WAKE_LOCK_SUSPEND, "ipc_wake_lock"); #endif IPC_DEBUG(DBG_INFO, "[ipc]: ipcs_init\n"); if (ipcs_init((void *)g_ipc_info.apcp_shmem, IPC_SIZE)) { rc = -1; IPC_DEBUG(DBG_ERROR,"[ipc]: ipcs_init() failed\n"); goto out_del; } if ( sEarlyCPInterrupt ) { IPC_DEBUG(DBG_INFO,"[ipc]: early CP interrupt - doing crash dump...\n"); #ifdef CONFIG_HAS_WAKELOCK wake_lock(&ipc_wake_lock); #endif schedule_work(&g_ipc_info.cp_crash_dump_wq); } // check for AP only boot mode if ( AP_ONLY_BOOT == get_ap_boot_mode() ) { IPC_DEBUG(DBG_INFO,"[ipc]: AP only boot - not waiting for CP\n"); } else { // wait for CP to have IPC setup as well; if we exit module init // before IPC is ready, RPC module will likely crash during its // own init startTime = current_kernel_time(); while ( !g_ipc_info.ipc_state ) { IPC_DEBUG(DBG_INFO, "[ipc]: CP IPC not ready, sleeping...\n"); msleep(20); readyChkCnt++; if ( readyChkCnt > 100 ) { IPC_DEBUG(DBG_ERROR, "[ipc]: IPC init timeout - no response from CP\n"); rc = -1; goto out_del; } } endTime = current_kernel_time(); IPC_DEBUG(DBG_INFO,"readyChkCnt=%d time=%ldus\n", readyChkCnt, ((endTime.tv_sec - startTime.tv_sec)*1000000L+(endTime.tv_nsec - startTime.tv_nsec)/1000L)); IPC_DEBUG(DBG_INFO,"[ipc]: ipcs_module_init ok\n"); } return 0; out_del: cdev_del(&g_ipc_info.cdev); out_unregister: unregister_chrdev_region(g_ipc_info.devnum, 1); out: IPC_DEBUG(DBG_ERROR,"IPC Driver Failed to initialise!\n"); return rc; }