static void setnodevector(ktap_State *ks, Table *t, int size) { int lsize; if (size == 0) { /* no elements to hash part? */ t->node = (Node *)dummynode; /* use common `dummynode' */ lsize = 0; } else { int i; lsize = ceillog2(size); if (lsize > MAXBITS) kp_runerror(ks, "table overflow"); size = twoto(lsize); t->node = kp_malloc(ks, size * sizeof(Node)); for (i = 0; i < size; i++) { Node *n = gnode(t, i); gnext(n) = NULL; setnilvalue(gkey(n)); setnilvalue(gval(n)); } } t->lsizenode = (u8)lsize; t->lastfree = gnode(t, size); /* all positions are free */ }
static ktap_callinfo *extend_ci(ktap_state *ks) { ktap_callinfo *ci; ci = kp_malloc(ks, sizeof(ktap_callinfo)); ks->ci->next = ci; ci->prev = ks->ci; ci->next = NULL; return ci; }
/* need to protect allgc field? */ ktap_gcobject *kp_obj_newobject(ktap_state *ks, int type, size_t size, ktap_gcobject **list) { ktap_gcobject *o; o = kp_malloc(ks, size); if (list == NULL) list = &G(ks)->allgc; gch(o)->tt = type; gch(o)->next = *list; *list = o; return o; }
/* need to protect allgc field? */ Gcobject *kp_newobject(ktap_State *ks, int type, size_t size, Gcobject **list) { Gcobject *o; o = kp_malloc(ks, sizeof(Gcobject) + size); if (list == NULL) list = &G(ks)->allgc; gch(o)->tt = type; gch(o)->marked = 0; gch(o)->next = *list; *list = o; return o; }
/* need to protect allgc field? */ ktap_obj_t *kp_obj_new(ktap_state_t *ks, size_t size) { ktap_obj_t *o, **list; if (ks != G(ks)->mainthread) { kp_error(ks, "kp_obj_new only can be called in mainthread\n"); return NULL; } o = kp_malloc(ks, size); if (unlikely(!o)) return NULL; list = &G(ks)->allgc; gch(o)->nextgc = *list; *list = o; return o; }
/* histogram: key should be number or string, value must be number */ void kp_table_histogram(ktap_State *ks, Table *t) { struct table_hist_record *thr; char dist_str[40]; int i, ratio, total = 0, count = 0; thr = kp_malloc(ks, sizeof(*thr) * (t->sizearray + sizenode(t))); for (i = 0; i < t->sizearray; i++) { Tvalue *v = &t->array[i]; if (isnil(v)) continue; if (!ttisnumber(v)) goto error; setnvalue(&thr[count++].key, i + 1); total += nvalue(v); } for (i = 0; i < sizenode(t); i++) { Node *n = &t->node[i]; int num; if (isnil(gkey(n))) continue; if (!ttisnumber(gval(n))) goto error; num = nvalue(gval(n)); setobj(ks, &thr[count].key, gkey(n)); setobj(ks, &thr[count].val, gval(n)); count++; total += nvalue(gval(n)); } sort(thr, count, sizeof(struct table_hist_record), hist_record_cmp, NULL); kp_printf(ks, "%32s%s%s\n", "value ", DISTRIBUTION_STR, " count"); dist_str[sizeof(dist_str) - 1] = '\0'; for (i = 0; i < count; i++) { Tvalue *key = &thr[i].key; Tvalue *val = &thr[i].val; memset(dist_str, ' ', sizeof(dist_str) - 1); ratio = (nvalue(val) * (sizeof(dist_str) - 1)) / total; memset(dist_str, '@', ratio); if (ttisstring(key)) { char buf[32 + 1] = {0}; char *keystr; if (strlen(svalue(key)) > 32) { strncpy(buf, svalue(key), 32-4); memset(buf + 32-4, '.', 3); keystr = buf; } else keystr = svalue(key); kp_printf(ks, "%32s |%s%-10d\n", keystr, dist_str, nvalue(val)); } else kp_printf(ks, "%32d | %s%-10d\n", nvalue(key), dist_str, nvalue(val)); } goto out; error: kp_printf(ks, "error: table histogram only handle " " (key: string/number val: number)\n"); out: kp_free(ks, thr); }
static void ffi_call_x86_64(ktap_state_t *ks, csymbol_func *csf, void *rvalue) { int i; int gpr_nr; int arg_bytes; /* total bytes needed for exceeded args in stack */ int mem_bytes; /* total bytes needed for memory storage */ char *stack, *stack_p, *gpr_p, *arg_p, *mem_p, *tmp_p; int arg_nr; csymbol *rsym; ffi_type rtype; size_t rsize; bool ret_in_memory; /* New stack to call C function */ char space[NEWSTACK_SIZE]; arg_nr = kp_arg_nr(ks); rsym = csymf_ret(ks, csf); rtype = csym_type(rsym); rsize = csym_size(ks, rsym); ret_in_memory = false; if (rtype == FFI_STRUCT || rtype == FFI_UNION) { if (rsize > 16) { rvalue = kp_malloc(ks, rsize); rtype = FFI_VOID; ret_in_memory = true; } else { /* much easier to always copy 16 bytes from registers */ rvalue = kp_malloc(ks, 16); } } gpr_nr = 0; arg_bytes = mem_bytes = 0; if (ret_in_memory) gpr_nr++; /* calculate bytes needed for stack */ for (i = 0; i < arg_nr; i++) { csymbol *cs = ffi_get_arg_csym(ks, csf, i); size_t size = csym_size(ks, cs); size_t align = csym_align(ks, cs); enum arg_status st = IN_REGISTER; int n_gpr_nr = 0; if (size > 32) { st = IN_MEMORY; n_gpr_nr = 1; } else if (size > 16) st = IN_STACK; else n_gpr_nr = ALIGN(size, GPR_SIZE) / GPR_SIZE; if (gpr_nr + n_gpr_nr > MAX_GPR) { if (st == IN_MEMORY) arg_bytes += GPR_SIZE; else st = IN_STACK; } else gpr_nr += n_gpr_nr; if (st == IN_STACK) { arg_bytes = ALIGN(arg_bytes, align); arg_bytes += size; arg_bytes = ALIGN(arg_bytes, STACK_ALIGNMENT); } if (st == IN_MEMORY) { mem_bytes = ALIGN(mem_bytes, align); mem_bytes += size; mem_bytes = ALIGN(mem_bytes, STACK_ALIGNMENT); } } /* apply space to fake stack for C function call */ if (16 + REDZONE_SIZE + MAX_GPR_SIZE + arg_bytes + mem_bytes + 6 * 8 >= NEWSTACK_SIZE) { kp_error(ks, "Unable to handle that many arguments by now\n"); return; } stack = space; /* 128 bytes below %rsp is red zone */ /* stack should be 16-bytes aligned */ stack_p = ALIGN_STACK(stack + REDZONE_SIZE, 16); /* save general purpose registers here */ gpr_p = stack_p; memset(gpr_p, 0, MAX_GPR_SIZE); /* save arguments in stack here */ arg_p = gpr_p + MAX_GPR_SIZE; /* save arguments in memory here */ mem_p = arg_p + arg_bytes; /* set additional space as temporary space */ tmp_p = mem_p + mem_bytes; /* copy arguments here */ gpr_nr = 0; if (ret_in_memory) { memcpy(gpr_p, &rvalue, GPR_SIZE); gpr_p += GPR_SIZE; gpr_nr++; } for (i = 0; i < arg_nr; i++) { csymbol *cs = ffi_get_arg_csym(ks, csf, i); size_t size = csym_size(ks, cs); size_t align = csym_align(ks, cs); enum arg_status st = IN_REGISTER; int n_gpr_nr = 0; if (size > 32) { st = IN_MEMORY; n_gpr_nr = 1; } else if (size > 16) st = IN_STACK; else n_gpr_nr = ALIGN(size, GPR_SIZE) / GPR_SIZE; if (st == IN_MEMORY) mem_p = ALIGN_STACK(mem_p, align); /* Tricky way about storing it above mem_p. It won't overflow * because temp region can be temporarily used if necesseary. */ ffi_unpack(ks, mem_p, csf, i, GPR_SIZE); if (gpr_nr + n_gpr_nr > MAX_GPR) { if (st == IN_MEMORY) { memcpy(arg_p, &mem_p, GPR_SIZE); arg_p += GPR_SIZE; } else st = IN_STACK; } else { memcpy(gpr_p, mem_p, n_gpr_nr * GPR_SIZE); gpr_p += n_gpr_nr * GPR_SIZE; gpr_nr += n_gpr_nr; } if (st == IN_STACK) { arg_p = ALIGN_STACK(arg_p, align); memcpy(arg_p, mem_p, size); arg_p += size; arg_p = ALIGN_STACK(arg_p, STACK_ALIGNMENT); } if (st == IN_MEMORY) { mem_p += size; mem_p = ALIGN_STACK(mem_p, STACK_ALIGNMENT); } } kp_verbose_printf(ks, "Stack location: %p -redzone- %p -general purpose " "register used- %p -zero- %p -stack for argument- %p" " -memory for argument- %p -temp stack-\n", stack, stack_p, gpr_p, stack_p + MAX_GPR_SIZE, arg_p, mem_p); kp_verbose_printf(ks, "GPR number: %d; arg in stack: %d; " "arg in mem: %d\n", gpr_nr, arg_bytes, mem_bytes); kp_verbose_printf(ks, "Return: address %p type %d\n", rvalue, rtype); kp_verbose_printf(ks, "Number of register used: %d\n", gpr_nr); kp_verbose_printf(ks, "Start FFI call on %p\n", csf->addr); ffi_call_assem_x86_64(stack_p, tmp_p, csf->addr, rvalue, rtype); }
/* ktap mainthread initization, main entry for ktap */ ktap_state *kp_newstate(struct ktap_parm *parm, struct dentry *dir, char **argv) { ktap_state *ks; pid_t pid; int cpu; ks = kzalloc(sizeof(ktap_state) + sizeof(ktap_global_state), GFP_KERNEL); if (!ks) return NULL; ks->stack = kp_malloc(ks, KTAP_STACK_SIZE); G(ks) = (ktap_global_state *)(ks + 1); G(ks)->mainthread = ks; G(ks)->seed = 201236; /* todo: make more random in future */ G(ks)->task = current; G(ks)->verbose = parm->verbose; /* for debug use */ G(ks)->print_timestamp = parm->print_timestamp; G(ks)->workload = parm->workload; INIT_LIST_HEAD(&(G(ks)->timers)); INIT_LIST_HEAD(&(G(ks)->probe_events_head)); G(ks)->exit = 0; if (kp_transport_init(ks, dir)) goto out; pid = (pid_t)parm->trace_pid; if (pid != -1) { struct task_struct *task; rcu_read_lock(); task = pid_task(find_vpid(pid), PIDTYPE_PID); if (!task) { kp_error(ks, "cannot find pid %d\n", pid); rcu_read_unlock(); goto out; } G(ks)->trace_task = task; get_task_struct(task); rcu_read_unlock(); } if( !alloc_cpumask_var(&G(ks)->cpumask, GFP_KERNEL)) goto out; cpumask_copy(G(ks)->cpumask, cpu_online_mask); cpu = parm->trace_cpu; if (cpu != -1) { if (!cpu_online(cpu)) { printk(KERN_INFO "ktap: cpu %d is not online\n", cpu); goto out; } cpumask_clear(G(ks)->cpumask); cpumask_set_cpu(cpu, G(ks)->cpumask); } if (cfunction_cache_init(ks)) goto out; kp_tstring_resize(ks, 512); /* set inital string hashtable size */ ktap_init_state(ks); ktap_init_registry(ks); ktap_init_arguments(ks, parm->argc, argv); /* init library */ kp_init_baselib(ks); kp_init_kdebuglib(ks); kp_init_timerlib(ks); kp_init_ansilib(ks); if (alloc_kp_percpu_data()) goto out; if (kp_probe_init(ks)) goto out; return ks; out: G(ks)->exit = 1; kp_final_exit(ks); return NULL; }