int do_getitimer(int which, struct itimerval *value) { register unsigned long val, interval; switch (which) { case ITIMER_REAL: interval = current->it_real_incr; val = 0; /* * FIXME! This needs to be atomic, in case the kernel timer happens! */ if (timer_pending(¤t->real_timer)) { val = current->real_timer.expires - jiffies; /* look out for negative/zero itimer.. */ if ((long) val <= 0) val = 1; } break; case ITIMER_VIRTUAL: val = current->it_virt_value; interval = current->it_virt_incr; break; case ITIMER_PROF: val = current->it_prof_value; interval = current->it_prof_incr; break; default: return(-EINVAL); } jiffies_to_timeval(val, &value->it_value); jiffies_to_timeval(interval, &value->it_interval); return 0; }
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd) { /*--------------------------------------------------------- // kick off PPP engine /---------------------------------------------------------*/ if (term == MDP_PPP_TERM) { if (mdp_debug[MDP_PPP_BLOCK]) { jiffies_to_timeval(jiffies, &mdp_ppp_timeval); } INIT_COMPLETION(mdp_ppp_comp); mdp_ppp_waiting = TRUE; // let's turn on PPP block mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE); //HWIO_OUT(MDP_DISPLAY0_START, 0x1000); outpdw(MDP_BASE + 0x30, 0x1000); wait_for_completion_interruptible(&mdp_ppp_comp); if (mdp_debug[MDP_PPP_BLOCK]) { struct timeval now; jiffies_to_timeval(jiffies, &now); mdp_ppp_timeval.tv_usec = now.tv_usec - mdp_ppp_timeval.tv_usec; MSM_FB_INFO("MDP-PPP: %d\n", (int)mdp_ppp_timeval.tv_usec); } } else if (term == MDP_DMA2_TERM) { if (mdp_debug[MDP_DMA2_BLOCK]) { MSM_FB_INFO("MDP-DMA2: %d\n", (int)mdp_dma2_timeval.tv_usec); jiffies_to_timeval(jiffies, &mdp_dma2_timeval); } // DMA update timestamp mdp_dma2_last_update_time = ktime_get_real(); // let's turn on DMA2 block // mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON,FALSE); #ifdef CONFIG_FB_MSM_MDP22 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0); // start DMA #else outpdw(MDP_BASE + 0x0044, 0x0); // start DMA #endif } else if (term == MDP_DMA_S_TERM) { mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0048, 0x0); // start DMA } }
size_t blktap_ring_debug(struct blktap *tap, char *buf, size_t size) { struct blktap_ring *ring = &tap->ring; char *s = buf, *end = buf + size; int usr_idx; s += snprintf(s, end - s, "begin pending:%d\n", ring->n_pending); for (usr_idx = 0; usr_idx < BLKTAP_RING_SIZE; usr_idx++) { struct blktap_request *request; struct timeval t; request = ring->pending[usr_idx]; if (!request) continue; jiffies_to_timeval(jiffies - request->rq->start_time, &t); s += snprintf(s, end - s, "%02d: usr_idx:%02d " "op:%x nr_pages:%02d time:%lu.%09lu\n", usr_idx, request->usr_idx, request->operation, request->nr_pages, t.tv_sec, t.tv_usec); } s += snprintf(s, end - s, "end pending\n"); return s - buf; }
int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) { int rc; long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = {NULL,}; LASSERT(nob > 0); /* * Caller may pass a zero timeout if she thinks the socket buffer is * empty enough to take the whole message immediately */ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, nob); for (;;) { msg.msg_flags = !timeout ? MSG_DONTWAIT : 0; if (timeout) { /* Set send timeout to remaining time */ jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof(tv)); if (rc) { CERROR("Can't set socket send timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; } } then = jiffies; rc = kernel_sendmsg(sock, &msg, &iov, 1, nob); jiffies_left -= jiffies - then; if (rc < 0) return rc; if (!rc) { CERROR("Unexpected zero rc\n"); return -ECONNABORTED; } if (!msg_data_left(&msg)) break; if (jiffies_left <= 0) return -EAGAIN; } return 0; }
static void l3g4200d_report_data(struct l3g4200d_data *gyro) { u8 buf[6]; s16 hw_d[3]; s16 x, y, z; int ret; ret = i2c_smbus_read_i2c_block_data(gyro->client, AUTO_INCREMENT | AXISDATA_REG, sizeof(buf), &buf[0]); if (ret < 0) { dev_err(&gyro->client->dev, "Failed to read axis data.\n"); return; } hw_d[0] = (s16) ((buf[1] << 8) | buf[0]); hw_d[1] = (s16) ((buf[3] << 8) | buf[2]); hw_d[2] = (s16) ((buf[5] << 8) | buf[4]); x = ((gyro->pdata->negate_x) ? (-hw_d[gyro->pdata->axis_map_x]) : (hw_d[gyro->pdata->axis_map_x])); y = ((gyro->pdata->negate_y) ? (-hw_d[gyro->pdata->axis_map_y]) : (hw_d[gyro->pdata->axis_map_y])); z = ((gyro->pdata->negate_z) ? (-hw_d[gyro->pdata->axis_map_z]) : (hw_d[gyro->pdata->axis_map_z])); #ifdef DEBUG { struct timeval now; jiffies_to_timeval(jiffies, &now); dev_dbg(&gyro->client->dev, "%ld.%ld: X=%d, Y=%d, Z=%d", now.tv_sec, now.tv_usec, (int) x, (int) y, (int) z); } #endif input_report_rel(gyro->input_dev, REL_X, x); input_report_rel(gyro->input_dev, REL_Y, y); input_report_rel(gyro->input_dev, REL_Z, z); input_sync(gyro->input_dev); }
void acct_update_integrals(struct task_struct *tsk) { if (likely(tsk->mm)) { cputime_t time, dtime; struct timeval value; unsigned long flags; u64 delta; local_irq_save(flags); time = tsk->stime + tsk->utime; dtime = time - tsk->acct_timexpd; jiffies_to_timeval(cputime_to_jiffies(dtime), &value); delta = value.tv_sec; delta = delta * USEC_PER_SEC + value.tv_usec; if (delta == 0) goto out; tsk->acct_timexpd = time; tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; out: local_irq_restore(flags); } }
/* * It would make sense to put struct rusage in the task_struct, * except that would make the task_struct be *really big*. After * task_struct gets moved into malloc'ed memory, it would * make sense to do this. It will make moving the rest of the information * a lot simpler! (Which we're not doing right now because we're not * measuring them yet). * * This is SMP safe. Either we are called from sys_getrusage on ourselves * below (we know we aren't going to exit/disappear and only we change our * rusage counters), or we are called from wait4() on a process which is * either stopped or zombied. In the zombied case the task won't get * reaped till shortly after the call to getrusage(), in both cases the * task being examined is in a frozen state so the counters won't change. */ int getrusage(struct task_struct *p, int who, struct rusage __user *ru) { struct rusage r; memset((char *) &r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: jiffies_to_timeval(p->utime, &r.ru_utime); jiffies_to_timeval(p->stime, &r.ru_stime); r.ru_nvcsw = p->nvcsw; r.ru_nivcsw = p->nivcsw; r.ru_minflt = p->min_flt; r.ru_majflt = p->maj_flt; r.ru_nswap = p->nswap; break; case RUSAGE_CHILDREN: jiffies_to_timeval(p->cutime, &r.ru_utime); jiffies_to_timeval(p->cstime, &r.ru_stime); r.ru_nvcsw = p->cnvcsw; r.ru_nivcsw = p->cnivcsw; r.ru_minflt = p->cmin_flt; r.ru_majflt = p->cmaj_flt; r.ru_nswap = p->cnswap; break; default: jiffies_to_timeval(p->utime + p->cutime, &r.ru_utime); jiffies_to_timeval(p->stime + p->cstime, &r.ru_stime); r.ru_nvcsw = p->nvcsw + p->cnvcsw; r.ru_nivcsw = p->nivcsw + p->cnivcsw; r.ru_minflt = p->min_flt + p->cmin_flt; r.ru_majflt = p->maj_flt + p->cmaj_flt; r.ru_nswap = p->nswap + p->cnswap; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; }
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd) { DISP_LOCAL_LOG_EMERG("DISP mdp_pipe_kickoff S\n"); /* complete all the writes before starting */ wmb(); /* kick off PPP engine */ if (term == MDP_PPP_TERM) { if (mdp_debug[MDP_PPP_BLOCK]) jiffies_to_timeval(jiffies, &mdp_ppp_timeval); /* let's turn on PPP block */ mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_enable_irq(term); INIT_COMPLETION(mdp_ppp_comp); mdp_ppp_waiting = TRUE; outpdw(MDP_BASE + 0x30, 0x1000); wait_for_completion_killable(&mdp_ppp_comp); mdp_disable_irq(term); if (mdp_debug[MDP_PPP_BLOCK]) { struct timeval now; jiffies_to_timeval(jiffies, &now); mdp_ppp_timeval.tv_usec = now.tv_usec - mdp_ppp_timeval.tv_usec; MSM_FB_DEBUG("MDP-PPP: %d\n", (int)mdp_ppp_timeval.tv_usec); } } else if (term == MDP_DMA2_TERM) { if (mdp_debug[MDP_DMA2_BLOCK]) { MSM_FB_DEBUG("MDP-DMA2: %d\n", (int)mdp_dma2_timeval.tv_usec); jiffies_to_timeval(jiffies, &mdp_dma2_timeval); } /* DMA update timestamp */ mdp_dma2_last_update_time = ktime_get_real(); /* let's turn on DMA2 block */ #if 0 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE); #endif #ifdef CONFIG_FB_MSM_MDP22 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */ #else mdp_lut_enable(); #ifdef CONFIG_FB_MSM_MDP40 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */ #else outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */ #endif #endif #ifdef CONFIG_FB_MSM_MDP40 } else if (term == MDP_DMA_S_TERM) { mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */ } else if (term == MDP_DMA_E_TERM) { mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */ } else if (term == MDP_OVERLAY0_TERM) { mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_lut_enable(); outpdw(MDP_BASE + 0x0004, 0); } else if (term == MDP_OVERLAY1_TERM) { mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_lut_enable(); outpdw(MDP_BASE + 0x0008, 0); } #else }
/* Actual dumper. * * This is a two-pass process; first we find the offsets of the bits, * and then they are actually written out. If we run out of core limit * we just truncate. */ static int irix_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) { int has_dumped = 0; mm_segment_t fs; int segs; int i; size_t size; struct vm_area_struct *vma; struct elfhdr elf; off_t offset = 0, dataoff; int numnote = 3; struct memelfnote notes[3]; struct elf_prstatus prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ /* Count what's needed to dump, up to the limit of coredump size. */ segs = 0; size = 0; for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { if (maydump(vma)) { int sz = vma->vm_end-vma->vm_start; if (size+sz >= limit) break; else size += sz; } segs++; } pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size); /* Set up header. */ memcpy(elf.e_ident, ELFMAG, SELFMAG); elf.e_ident[EI_CLASS] = ELFCLASS32; elf.e_ident[EI_DATA] = ELFDATA2LSB; elf.e_ident[EI_VERSION] = EV_CURRENT; elf.e_ident[EI_OSABI] = ELF_OSABI; memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); elf.e_type = ET_CORE; elf.e_machine = ELF_ARCH; elf.e_version = EV_CURRENT; elf.e_entry = 0; elf.e_phoff = sizeof(elf); elf.e_shoff = 0; elf.e_flags = 0; elf.e_ehsize = sizeof(elf); elf.e_phentsize = sizeof(struct elf_phdr); elf.e_phnum = segs+1; /* Include notes. */ elf.e_shentsize = 0; elf.e_shnum = 0; elf.e_shstrndx = 0; fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; current->flags |= PF_DUMPCORE; DUMP_WRITE(&elf, sizeof(elf)); offset += sizeof(elf); /* Elf header. */ offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers. */ /* Set up the notes in similar form to SVR4 core dumps made * with info from their /proc. */ memset(&psinfo, 0, sizeof(psinfo)); memset(&prstatus, 0, sizeof(prstatus)); notes[0].name = "CORE"; notes[0].type = NT_PRSTATUS; notes[0].datasz = sizeof(prstatus); notes[0].data = &prstatus; prstatus.pr_info.si_signo = prstatus.pr_cursig = signr; prstatus.pr_sigpend = current->pending.signal.sig[0]; prstatus.pr_sighold = current->blocked.sig[0]; psinfo.pr_pid = prstatus.pr_pid = task_pid_vnr(current); psinfo.pr_ppid = prstatus.pr_ppid = task_pid_vnr(current->parent); psinfo.pr_pgrp = prstatus.pr_pgrp = task_pgrp_vnr(current); psinfo.pr_sid = prstatus.pr_sid = task_session_vnr(current); if (thread_group_leader(current)) { /* * This is the record for the group leader. Add in the * cumulative times of previous dead threads. This total * won't include the time of each live thread whose state * is included in the core dump. The final total reported * to our parent process when it calls wait4 will include * those sums as well as the little bit more time it takes * this and each other thread to finish dying after the * core dump synchronization phase. */ jiffies_to_timeval(current->utime + current->signal->utime, &prstatus.pr_utime); jiffies_to_timeval(current->stime + current->signal->stime, &prstatus.pr_stime); } else { jiffies_to_timeval(current->utime, &prstatus.pr_utime); jiffies_to_timeval(current->stime, &prstatus.pr_stime); } jiffies_to_timeval(current->signal->cutime, &prstatus.pr_cutime); jiffies_to_timeval(current->signal->cstime, &prstatus.pr_cstime); if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) { printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) " "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs)); } else { *(struct pt_regs *)&prstatus.pr_reg = *regs; } notes[1].name = "CORE"; notes[1].type = NT_PRPSINFO; notes[1].datasz = sizeof(psinfo); notes[1].data = &psinfo; i = current->state ? ffz(~current->state) + 1 : 0; psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; psinfo.pr_nice = task_nice(current); psinfo.pr_flag = current->flags; psinfo.pr_uid = current->uid; psinfo.pr_gid = current->gid; { int i, len; set_fs(fs); len = current->mm->arg_end - current->mm->arg_start; len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len; (void *) copy_from_user(&psinfo.pr_psargs, (const char __user *)current->mm->arg_start, len); for (i = 0; i < len; i++) if (psinfo.pr_psargs[i] == 0) psinfo.pr_psargs[i] = ' '; psinfo.pr_psargs[len] = 0; set_fs(KERNEL_DS); } strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); /* Try to dump the FPU. */ prstatus.pr_fpvalid = dump_fpu(regs, &fpu); if (!prstatus.pr_fpvalid) { numnote--; } else { notes[2].name = "CORE"; notes[2].type = NT_PRFPREG; notes[2].datasz = sizeof(fpu); notes[2].data = &fpu; } /* Write notes phdr entry. */ { struct elf_phdr phdr; int sz = 0; for (i = 0; i < numnote; i++) sz += notesize(¬es[i]); phdr.p_type = PT_NOTE; phdr.p_offset = offset; phdr.p_vaddr = 0; phdr.p_paddr = 0; phdr.p_filesz = sz; phdr.p_memsz = 0; phdr.p_flags = 0; phdr.p_align = 0; offset += phdr.p_filesz; DUMP_WRITE(&phdr, sizeof(phdr)); } /* Page-align dumped data. */ dataoff = offset = roundup(offset, PAGE_SIZE); /* Write program headers for segments dump. */ for (vma = current->mm->mmap, i = 0; i < segs && vma != NULL; vma = vma->vm_next) { struct elf_phdr phdr; size_t sz; i++; sz = vma->vm_end - vma->vm_start; phdr.p_type = PT_LOAD; phdr.p_offset = offset; phdr.p_vaddr = vma->vm_start; phdr.p_paddr = 0; phdr.p_filesz = maydump(vma) ? sz : 0; phdr.p_memsz = sz; offset += phdr.p_filesz; phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W; if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X; phdr.p_align = PAGE_SIZE; DUMP_WRITE(&phdr, sizeof(phdr)); } for (i = 0; i < numnote; i++) if (!writenote(¬es[i], file)) goto end_coredump; set_fs(fs); DUMP_SEEK(dataoff); for (i = 0, vma = current->mm->mmap; i < segs && vma != NULL; vma = vma->vm_next) { unsigned long addr = vma->vm_start; unsigned long len = vma->vm_end - vma->vm_start; if (!maydump(vma)) continue; i++; pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len); DUMP_WRITE((void __user *)addr, len); } if ((off_t) file->f_pos != offset) { /* Sanity check. */ printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", (off_t) file->f_pos, offset); } end_coredump: set_fs(fs); return has_dumped; }
int do_test(void) { struct kfi_cq_msg_entry comp; int len = msg_len * post_depth; int msg_cnt = num_msgs; int tx_bufs_sent = 0; int ret; char *mp; u64 time_elap; #if SREAD == 0 int eagain_cnt = EAGAIN_TRIES; #endif if (!ctx.buf) { ctx.buf = kmalloc(len, GFP_KERNEL); if (!ctx.buf) { print_err("kalloc failed!\n"); return -ENOMEM; } ret = kfi_mr_reg(ctx.domain, ctx.buf, len, 0, 0, 0, 0, &ctx.mr, NULL, NULL); if (ret) { print_err("kfi_mr_reg returned %d\n", ret); kfree(ctx.buf); ctx.buf = ERR_PTR(-EFAULT); return ret; } } else if (IS_ERR(ctx.buf)) return 0; print_msg("post_depth %d num_msgs %d msg_len %d SREAD[%d]\n", post_depth, num_msgs, msg_len, SREAD); print_dbg("ctx.buf %p '%s' len %ld msg_len %d\n", ctx.buf, ctx.buf, strlen(ctx.buf)+1, msg_len); time_elap = get_jiffies_64(); for (mp = ctx.buf; msg_cnt > 0 && !kthread_should_stop(); ) { int post_cnt, cnt; post_cnt = (msg_cnt > post_depth ? post_depth : msg_cnt); for (cnt = 0, mp = ctx.buf; cnt < post_cnt; cnt++, mp += msg_len) { if (verify) { sprintf(mp, TEST_MESSAGE, tx_bufs_sent); tx_bufs_sent++; } ret = kfi_send(ctx.ep, mp, msg_len, kfi_mr_desc(ctx.mr), 0, mp); if (ret) { print_err("kfi_send returned %d '%s'\n", ret, kfi_strerror(ret)); return ret; } if (kthread_should_stop()) return -EINTR; } /* reap completions */ for (cnt = 0; cnt < post_cnt; cnt++) { #if SREAD ret = kfi_cq_sread(ctx.scq, &comp, 1, 0, TIMEOUT); if (ret == -ETIMEDOUT) { print_msg("%s(ETIMEDOUT) cnt %d post_cnt %d " "msg_cnt %d\n", "kfi_cq_sread", cnt, post_cnt, msg_cnt); } if (kthread_should_stop()) return -EINTR; #else do { ret = kfi_cq_read(ctx.scq, &comp, 1); if (ret == 0 || ret == -EAGAIN) { if (--eagain_cnt <= 0) { dprint(DEBUG_HIGH, "%s(resched %d) cnt " "%d post_cnt %d\n", "kfi_cq_read", ret, cnt, post_cnt); eagain_cnt = EAGAIN_TRIES; schedule(); } } if (kthread_should_stop()) return -EINTR; } while (ret == 0 || ret == -EAGAIN); #endif if (ret < 0) { struct kfi_cq_err_entry cqe = { 0 }; int rc; rc = kfi_cq_readerr(ctx.scq, &cqe, 0); print_err("kfi_cq_read returned %d '%s'\n", ret, kfi_strerror(ret)); if (rc) { char buf[64]; print_err("kfi_cq_readerr() err '%s'(%d)" "\n", kfi_strerror(cqe.err), cqe.err); print_err("kfi_cq_readerr() prov_err " "'%s'(%d)\n", kfi_cq_strerror(ctx.scq, cqe.prov_errno, cqe.err_data, buf, sizeof(buf)), cqe.prov_errno); } return ret; } if (!ret) print_err("kfi_cq_sread no completion? ret %d\n", ret); #if 0 if ((char *)comp.op_context < (char *)ctx.buf || (char *)comp.op_context >= (char *) &ctx.buf[msg_len*post_depth]) { print_err("cq.op_context(%p) not in range " "[ctx.buf(%p) ... &ctx.buf[%d](%p)]\n", (void *)comp.op_context, (void *)ctx.buf, msg_len, (void *)&ctx.buf[msg_len]); } #endif if (verify) print_msg("Tx '%s'\n", (char *) comp.op_context); } msg_cnt -= post_cnt; } time_elap = get_jiffies_64() - time_elap; #define AGIG (1024UL*1024UL*1024UL) #define AMEG (1024UL*1024UL) #define AKILO (1024UL) { struct timeval tv; ulong rate, rate_mod, bytes, units_of; char units; jiffies_to_timeval(time_elap, &tv); bytes = (ulong) num_msgs * (ulong) msg_len; if (bytes >= AKILO && tv.tv_sec > 0) { rate = bytes / tv.tv_sec; rate_mod = bytes % tv.tv_sec; if (rate >= AGIG) { units = 'G'; units_of = AGIG; } else if (rate >= AMEG) { units = 'M'; units_of = AMEG; } else { units = 'K'; units_of = AKILO; } rate /= units_of; } else { rate = rate_mod = 0UL; units = ' '; units_of = 1UL; } print_info("Tx %d msgs (%lu.%lu%cB) @ ~%lu.%lu %cB/sec (%ld sec %ld " "usec)\n", num_msgs, (bytes/units_of), (bytes % units_of), units, rate, rate_mod, units, tv.tv_sec, tv.tv_usec); } return 0; }
inline void ccnl_get_timeval(struct timeval *tv) { jiffies_to_timeval(jiffies, tv); }
int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) { int rc; long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); LASSERT(jiffies_left > 0); for (;;) { struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = { .msg_flags = 0 }; /* Set receive timeout to remaining time */ jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)); if (rc) { CERROR("Can't set socket recv timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; } then = jiffies; rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0); jiffies_left -= jiffies - then; if (rc < 0) return rc; if (!rc) return -ECONNRESET; buffer = ((char *)buffer) + rc; nob -= rc; if (!nob) return 0; if (jiffies_left <= 0) return -ETIMEDOUT; } } EXPORT_SYMBOL(lnet_sock_read); static int lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, int local_port) { struct sockaddr_in locaddr; struct socket *sock; int rc; int option; /* All errors are fatal except bind failure if the port is in use */ *fatal = 1; rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); *sockp = sock; if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } option = 1; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); goto failed; } if (local_ip || local_port) { memset(&locaddr, 0, sizeof(locaddr)); locaddr.sin_family = AF_INET; locaddr.sin_port = htons(local_port); if (!local_ip) locaddr.sin_addr.s_addr = htonl(INADDR_ANY); else locaddr.sin_addr.s_addr = htonl(local_ip); rc = kernel_bind(sock, (struct sockaddr *)&locaddr, sizeof(locaddr)); if (rc == -EADDRINUSE) { CDEBUG(D_NET, "Port %d already in use\n", local_port); *fatal = 0; goto failed; } if (rc) { CERROR("Error trying to bind to port %d: %d\n", local_port, rc); goto failed; } } return 0; failed: sock_release(sock); return rc; } int lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) { int option; int rc; if (txbufsize) { option = txbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set send buffer %d: %d\n", option, rc); return rc; } } if (rxbufsize) { option = rxbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set receive buffer %d: %d\n", option, rc); return rc; } } return 0; }
void validate_lbr(void) { #ifdef ARMOR_JIT uint8_t hash[DIGEST_LENGTH]; int i; struct lbr_t lbr; struct timeval time; unsigned long jit_start_j, jit_stop_j, jit_delta_j; printk("total validate lbr state %ld\n",total_count++); get_cpu(); get_lbr(&lbr); dump_lbr(&lbr); put_cpu(); if (disable_jit) { printk("[validation] -- WARNING -- JIT disabled!\n"); return; } printdj(true,"[validation] JIT: Acquiring lock...\n"); mutex_lock(&validation_lock); printdj(true,"[validation] JIT: Lookup\n"); ARMOR_STAT_INC(jit_lookups); /* Compute a hash of the lbr and look it up in the cache. */ hash_lbr(hash,&lbr); for (i = 0; i < jit_cache.hashes; i++) { if (memcmp(jit_cache.hash[i], hash, DIGEST_LENGTH) == 0) { ARMOR_STAT_INC(jit_cache_hits); #ifdef ARMOR_DEBUG_JIT printk("[validation] LBR state is valid (found in JIT cache)\n"); #endif mutex_unlock(&validation_lock); return; } } /* Not found in cache. Let's ask Dennis. Using Enes' semaphore design. */ ARMOR_STAT_INC(jit_requests); jit_work = &lbr; printdj(true, "[validation] JIT: Request\n"); /* Start the timers. */ jit_start_j = jiffies; up(&jit_work_cond); printdj(true, "[validation] JIT: Waiting\n"); if (down_timeout(&jit_result_cond, jit_waittime) < 0) { printk("[validation] JIT: Timeout\n"); ARMOR_STAT_INC(jit_timeouts); disable_jit = 1; mutex_unlock(&validation_lock); return; } /* Stop the timers. */ jit_stop_j = jiffies; jit_delta_j = jit_stop_j - jit_start_j; /* JIT may be faster than we can measure jiffies. If this happens, assume a * half jiffie was used. * http://stackoverflow.com/questions/10392735/am-i-too-fast-to-count-jiffies */ if (jit_delta_j == 0) jit_delta_j = stats.jit_lookups % 2; printdj(true, "That took us %lu jiffies\n", jit_delta_j); jiffies_to_timeval(jit_delta_j, &time); ARMOR_STAT_ADD(jit_sec, time.tv_sec); ARMOR_STAT_ADD(jit_usec, time.tv_usec); printdj(true, "[validation] JIT: Processing result\n"); if (jit_result == 0) { printk("[validation] -- WARNING -- LBR state rendered *INVALID* by jit-analyzer\n"); ARMOR_STAT_INC(jit_misses); // kill_pid(task_pid(current), SIGKILL, 1); printk("[validation] -- WARNING -- ASSUMING VALID\n"); goto assume_valid; } if (jit_result == 2) { printk("[validation] -- WARNING -- LBR state not validated due to uninstrumentable function\n"); ARMOR_STAT_INC(jit_unsupported); printk("[validation] -- WARNING -- ASSUMING VALID\n"); goto assume_valid; } if (jit_result == 1) { ARMOR_STAT_INC(jit_hits); assume_valid: /* Dennis' says it is ok. Let's add it to the - circular - cache so he * can take some time off next time. */ /* TODO, this should probably be a sorted linked list so that we can do a binary search? */ memcpy(jit_cache.hash[jit_cache.hashes], hash, DIGEST_LENGTH); jit_cache.hashes = (jit_cache.hashes + 1) % JIT_CACHE_SIZE; #ifdef ARMOR_DEBUG_JIT printk("[validation] LBR state is valid\n"); #endif } mutex_unlock(&validation_lock); return; #else struct lbr_t lbr; get_cpu(); get_lbr(&lbr); dump_lbr(&lbr); put_cpu(); return; #endif // ARMOR_JIT }
static void hello_init(void){ struct timeval val; printk(KERN_ALERT "enter hello world\n"); jiffies_to_timeval(1000,&val); printk(KERN_ALERT "%d:%d\n",val.tv_sec,val.tv_usec); }
void k_getrusage(struct task_struct *p, int who, struct rusage *r) { struct task_struct *t; unsigned long flags; unsigned long utime, stime; memset((char *) r, 0, sizeof *r); if (unlikely(!p->signal)) return; switch (who) { case RUSAGE_CHILDREN: spin_lock_irqsave(&p->sighand->siglock, flags); utime = p->signal->cutime; stime = p->signal->cstime; r->ru_nvcsw = p->signal->cnvcsw; r->ru_nivcsw = p->signal->cnivcsw; r->ru_minflt = p->signal->cmin_flt; r->ru_majflt = p->signal->cmaj_flt; r->ru_inblock = p->signal->inblock; r->ru_oublock = p->signal->oublock; spin_unlock_irqrestore(&p->sighand->siglock, flags); jiffies_to_timeval(utime, &r->ru_utime); jiffies_to_timeval(stime, &r->ru_stime); break; case RUSAGE_SELF: spin_lock_irqsave(&p->sighand->siglock, flags); utime = stime = 0; goto sum_group; case RUSAGE_BOTH: spin_lock_irqsave(&p->sighand->siglock, flags); utime = p->signal->cutime; stime = p->signal->cstime; r->ru_nvcsw = p->signal->cnvcsw; r->ru_nivcsw = p->signal->cnivcsw; r->ru_minflt = p->signal->cmin_flt; r->ru_majflt = p->signal->cmaj_flt; r->ru_inblock = p->signal->inblock; r->ru_oublock = p->signal->oublock; sum_group: utime += p->signal->utime; stime += p->signal->stime; r->ru_nvcsw += p->signal->nvcsw; r->ru_nivcsw += p->signal->nivcsw; r->ru_minflt += p->signal->min_flt; r->ru_majflt += p->signal->maj_flt; r->ru_inblock += p->signal->inblock; r->ru_oublock += p->signal->oublock; t = p; do { utime += t->utime; stime += t->stime; r->ru_nvcsw += t->nvcsw; r->ru_nivcsw += t->nivcsw; r->ru_minflt += t->min_flt; r->ru_majflt += t->maj_flt; r->ru_inblock += task_io_get_inblock(t); r->ru_oublock += task_io_get_oublock(t); t = next_thread(t); } while (t != p); spin_unlock_irqrestore(&p->sighand->siglock, flags); jiffies_to_timeval(utime, &r->ru_utime); jiffies_to_timeval(stime, &r->ru_stime); break; default: BUG(); } }
irqreturn_t mdp_isr(int irq, void *ptr) { uint32 mdp_interrupt = 0; struct mdp_dma_data *dma; mdp_is_in_isr = TRUE; do { mdp_interrupt = inp32(MDP_INTR_STATUS); outp32(MDP_INTR_CLEAR, mdp_interrupt); mdp_interrupt &= mdp_intr_mask; if (mdp_interrupt & TV_ENC_UNDERRUN) { mdp_interrupt &= ~(TV_ENC_UNDERRUN); mdp_tv_underflow_cnt++; } if (!mdp_interrupt) break; /////////////////////////////// // DMA3 TV-Out Start /////////////////////////////// if (mdp_interrupt & TV_OUT_DMA3_START) { // let's disable TV out interrupt mdp_intr_mask &= ~TV_OUT_DMA3_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); dma = &dma3_data; if (dma->waiting) { dma->waiting = FALSE; complete(&dma->comp); } } #ifndef CONFIG_FB_MSM_MDP22 /////////////////////////////// // LCDC UnderFlow /////////////////////////////// if (mdp_interrupt & LCDC_UNDERFLOW) { mdp_lcdc_underflow_cnt++; } /////////////////////////////// // LCDC Frame Start /////////////////////////////// if (mdp_interrupt & LCDC_FRAME_START) { // let's disable LCDC interrupt mdp_intr_mask &= ~LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); dma = &dma2_data; if (dma->waiting) { dma->waiting = FALSE; complete(&dma->comp); } } /////////////////////////////// // DMA2 LCD-Out Complete /////////////////////////////// if (mdp_interrupt & MDP_DMA_S_DONE) { dma = &dma_s_data; dma->busy = FALSE; mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF, TRUE); complete(&dma->comp); } #endif /////////////////////////////// // DMA2 LCD-Out Complete /////////////////////////////// if (mdp_interrupt & MDP_DMA_P_DONE) { struct timeval now; ktime_t now_k; now_k = ktime_get_real(); mdp_dma2_last_update_time.tv.sec = now_k.tv.sec - mdp_dma2_last_update_time.tv.sec; mdp_dma2_last_update_time.tv.nsec = now_k.tv.nsec - mdp_dma2_last_update_time.tv.nsec; if (mdp_debug[MDP_DMA2_BLOCK]) { jiffies_to_timeval(jiffies, &now); mdp_dma2_timeval.tv_usec = now.tv_usec - mdp_dma2_timeval.tv_usec; } dma = &dma2_data; dma->busy = FALSE; mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, TRUE); complete(&dma->comp); } /////////////////////////////// // PPP Complete /////////////////////////////// if (mdp_interrupt & MDP_PPP_DONE) { mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE); if (mdp_ppp_waiting) { mdp_ppp_waiting = FALSE; complete(&mdp_ppp_comp); } } } while (1); mdp_is_in_isr = FALSE; return IRQ_HANDLED; }