/* This must be safe from any context. It's safe writing here * because of the head/tail separation of the writer and reader * of the CPU buffer. * * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, int is_kernel, unsigned long event) { struct task_struct * task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; } is_kernel = !!is_kernel; task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } add_sample(cpu_buf, pc, event); return 1; }
/* This must be safe from any context. It's safe writing here * because of the head/tail separation of the writer and reader * of the CPU buffer. * * cpu_mode is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel/user (and xen) * enter events whenever cpu_mode changes */ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, int cpu_mode, unsigned long event) { struct task_struct * task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; } task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_cpu_mode != cpu_mode) { cpu_buf->last_cpu_mode = cpu_mode; add_code(cpu_buf, cpu_mode); } /* notice a task switch */ /* if not processing other domain samples */ if ((cpu_buf->last_task != task) && (current_domain == COORDINATOR_DOMAIN)) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } if (pc == IBS_FETCH_CODE || pc == IBS_OP_CODE) add_code(cpu_buf, cpu_mode); add_sample(cpu_buf, pc, event); return 1; }
/* * main prefix code generate method */ static void generate_main_prefix_code(CodeAttribute *ca){ add_code(ca, ALOAD_0); add_code(ca, ICONST_0); add_code(ca, AALOAD); add_code(ca, INVOKESTATIC, add_constant_method_info("java/lang/Integer", "parseInt", "(Ljava/lang/String;)I")); add_code(ca, ISTORE_0); }
/* This must be safe from any context. It's safe writing here * because of the head/tail separation of the writer and reader * of the CPU buffer. * * cpu_mode is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel/user (and xen) * enter events whenever cpu_mode changes */ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, int cpu_mode, unsigned long event) { struct task_struct * task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; } WARN_ON(cpu_mode > CPU_MODE_XEN); task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_cpu_mode != cpu_mode) { cpu_buf->last_cpu_mode = cpu_mode; add_code(cpu_buf, cpu_mode); } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } add_sample(cpu_buf, pc, event); return 1; }
/* * call function code generate method */ static void generate_call_expression(CodeAttribute *ca, CallExpression *ce){ if(ce->parameter_expression){ generate_expression_code(ca, ce->parameter_expression); add_code(ca, INVOKESTATIC, add_constant_method_info_with_class(ca->this_class_index, ce->identifier, "(I)I")); } else{ add_code(ca, INVOKESTATIC, add_constant_method_info_with_class(ca->this_class_index, ce->identifier, "()I")); } }
/* * load int to stack code generate method */ static void generate_int_expression(CodeAttribute *ca, int value){ switch(value){ case 0: add_code(ca, ICONST_0); return; break; case 1: add_code(ca, ICONST_1); return; break; case 2: add_code(ca, ICONST_2); return; break; case 3: add_code(ca, ICONST_3); return; break; case 4: add_code(ca, ICONST_4); return; break; case 5: add_code(ca, ICONST_5); return; break; } if(-128 <= value && value < 128) add_code(ca, BIPUSH, value); else if(-32768 <= value && value < 32768) add_code(ca, SIPUSH, value); else{ // TODO ldc } }
void spcs_s_add(spcs_s_info_t kstatus, spcs_s_status_t stcode, ...) { va_list ap; spcs_s_udata_t c; spcs_s_pinfo_t *p; char *sp; #ifdef UNISTAT_TRACE cmn_err(CE_WARN, "!cspcs_s_add entry"); #endif p = (spcs_s_pinfo_t *)kstatus; c.s = stcode; if (add_code(p, stcode) == B_TRUE) { #ifdef UNISTAT_TRACE cmn_err(CE_WARN, "!cspcs_s_add exit 1"); #endif return; } va_start(ap, stcode); while (c.f.sup_count--) { sp = va_arg(ap, caddr_t); if (sp != (char *)NULL) add_item(p, sp); } va_end(ap); #ifdef UNISTAT_TRACE cmn_err(CE_WARN, "!cspcs_s_add exit 2"); #endif }
void set_text(SWFMovie mo, const char* text) { char buf[1024]; sprintf(buf, "_root.msg=\"%s\";", text); add_code(mo, buf); }
long lzw_encode(unsigned char *source,void *target,int size) //Encode LZW. zdroj, cil a velikost dat. Vraci velikost komprimovano. { long bitpos = 0; DOUBLE_S p; int f; clear: old_value = p.group = *source++;size--; while (size-->0) { p.chr = (int)((unsigned char)(*source++));old_value += p.chr; f = find_code(&p); if (f<0) { bitpos = output_code_c(target,bitpos,bitsize,p.group); add_code(&p); if (nextgroup == (1<<bitsize)) bitsize++; p.group = p.chr; if (nextgroup>= LZW_MAX_CODES) { bitpos = output_code_c(target,bitpos,bitsize,p.group); bitpos = output_code_c(target,bitpos,bitsize,clear_code); do_clear_code(); goto clear; } } else p.group = f; } bitpos = output_code_c(target,bitpos,bitsize,p.group); bitpos = output_code_c(target,bitpos,bitsize,end_code); return (bitpos+8)>>3; }
intercodes link(intercodes l1,intercodes l2) { intercodes tmp; if (l1 == NULL && l2 == NULL) return NULL; if (l1 == NULL) { add_code(l2); return l2; } if (l2 == NULL) { add_code(l1); return l1; } tmp = l1; tmp->next = l2; l2->prev = tmp; return l1; }
long lzw_encode(char *source,void *target,int size) { long bitpos = 0; DOUBLE_S p; int f; clear: old_value = p.group = *source++;size--; while (size-->0) { p.chr = (int)((char)(*source++-old_value));old_value += p.chr; f = find_code(&p); if (f<0) { bitpos = output_code(target,bitpos,bitsize,p.group); add_code(&p); if (nextgroup == (1<<bitsize)) bitsize++; p.group = p.chr; if (nextgroup>= LZW_MAX_CODES) { bitpos = output_code(target,bitpos,bitsize,p.group); bitpos = output_code(target,bitpos,bitsize,clear_code); do_clear_code(); goto clear; } } else p.group = f; } bitpos = output_code(target,bitpos,bitsize,p.group); bitpos = output_code(target,bitpos,bitsize,end_code); return (bitpos+8)>>3; }
void init() { static const char *cls[] = { "AEIOUHWY", "", "BFPV", "CGJKQSXZ", "DT", "L", "MN", "R", 0}; int i; for (i = 0; cls[i]; i++) add_code(cls[i], i - 1); }
static void generate_function_pattern_code(CodeAttribute *ca, FunctionPattern *fp){ Code *jump_operator; u2 start_pc; add_code(ca, ILOAD_0); if(fp->pattern){ generate_int_expression(ca, fp->pattern); start_pc = ca->code_length; jump_operator = add_code(ca, IF_ICOMPNE, 0); } else{ start_pc = ca->code_length; jump_operator = add_code(ca, IFNE, 0); } generate_expression_code(ca, fp->statement->expression); add_code(ca, IRETURN); jump_operator->next->u.operand_short = ca->code_length - start_pc; }
intercodes gen_assign(kind2 kind,operand left,operand right) { intercodes ic = (intercodes)malloc(sizeof(struct InterCodes)); ic->code = (intercode)malloc(sizeof(struct InterCode)); ic->code->kind = kind; ic->code->u.assign.left = left; ic->code->u.assign.right = right; add_code(ic); return ic; }
void oprofile_add_ibs_sample(struct pt_regs *const regs, unsigned int *const ibs_sample, int ibs_code) { int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); struct task_struct *task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { /* we can't backtrace since we lost the source of this event */ cpu_buf->sample_lost_overflow++; return; } /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (!is_kernel) { task = current; if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } } add_code(cpu_buf, ibs_code); add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); if (ibs_code == IBS_OP_BEGIN) { add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); } if (backtrace_depth) oprofile_ops.backtrace(regs, backtrace_depth); }
intercodes gen_one(kind2 kind,operand op) { intercodes ic = (intercodes)malloc(sizeof(struct InterCodes)); ic->prev = ic->next = NULL; ic->code = (intercode)malloc(sizeof(struct InterCode)); ic->code->kind = kind; // printf("%d\n",kind); ic->code->u.one.op = op; add_code(ic); return ic; }
static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) { struct task_struct *task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { cpu_buf->sample_lost_overflow++; return 0; } is_kernel = !!is_kernel; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (!is_kernel) { task = current; if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } } add_code(cpu_buf, ibs_code); add_sample(cpu_buf, ibs[0], ibs[1]); add_sample(cpu_buf, ibs[2], ibs[3]); add_sample(cpu_buf, ibs[4], ibs[5]); if (ibs_code == IBS_OP_BEGIN) { add_sample(cpu_buf, ibs[6], ibs[7]); add_sample(cpu_buf, ibs[8], ibs[9]); add_sample(cpu_buf, ibs[10], ibs[11]); } return 1; }
void add_code( perseus::detail::code_segment& out_code, const label_map& labels, const label_reference_offset& reference, Tail... tail ) { auto pos = labels.find( reference.name ); if( pos == labels.end() ) { throw std::logic_error( "invalid label reference" ); } out_code.push< std::uint32_t >( pos->second - out_code.size() - sizeof( std::uint32_t ) ); add_code( out_code, labels, tail... ); }
static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { if (nr_available_slots(cpu_buf) < 4) { cpu_buf->sample_lost_overflow++; return 0; } add_code(cpu_buf, CPU_TRACE_BEGIN); cpu_buf->tracing = 1; return 1; }
/* * operate binary operator code generate method */ static void generate_binary_expression(CodeAttribute *ca, BinaryExpression be){ generate_expression_code(ca, be.left); generate_expression_code(ca, be.right); switch(be.kind){ case ADD_OPERATOR: add_code(ca, IADD); break; case SUB_OPERATOR: add_code(ca, ISUB); break; case MUL_OPERATOR: add_code(ca, IMUL); break; case DIV_OPERATOR: add_code(ca, IDIV); break; case MOD_OPERATOR: add_code(ca, IREM); } }
intercodes gen_binop(kind2 kind,operand result,operand op1,operand op2) { intercodes ic = (intercodes)malloc(sizeof(struct InterCodes)); ic->prev = ic->next = NULL; ic->code = (intercode)malloc(sizeof(struct InterCode)); ic->code->kind = kind; ic->code->u.binop.result = result; ic->code->u.binop.op1 = op1; ic->code->u.binop.op2 = op2; add_code(ic); return ic; }
static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, int cpu_mode, unsigned int *ibs, int ibs_code) { struct task_struct *task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 14) { cpu_buf->sample_lost_overflow++; return 0; } task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_cpu_mode != cpu_mode) { cpu_buf->last_cpu_mode = cpu_mode; add_code(cpu_buf, cpu_mode); } /* notice a task switch */ /* if not processing other domain samples */ if ((cpu_buf->last_task != task) && (current_domain == COORDINATOR_DOMAIN)) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } add_code(cpu_buf, ibs_code); add_sample(cpu_buf, ibs[0], ibs[1]); add_sample(cpu_buf, ibs[2], ibs[3]); add_sample(cpu_buf, ibs[4], ibs[5]); if (ibs_code == IBS_OP_BEGIN) { add_sample(cpu_buf, ibs[6], ibs[7]); add_sample(cpu_buf, ibs[8], ibs[9]); add_sample(cpu_buf, ibs[10], ibs[11]); } return 1; }
void translate(Node* h) { if(h==NULL)return; switch(h->type) { case Specifier:return; case FunDec: { func_d* temp=find_function(h->child[0]->name); add_code(3,"FUNCTION",h->child[0]->name,":"); for(int i=0;i<temp->parameter_count;i++) add_code(2,"PARAM",temp->parameter_list[i]->name); current_func=temp; break; } case Dec: { Node* p=h->child[0]->child[0]; if(p->type!=_ID) { p=p->child[0]; if(p->type!=_ID) { printf("Cannot translate: Code contains variables of multi-dimensional \ array type or parameters of array type.\n"); exit(0); } } val_d* temp=find_value(p->name); if(temp->kind==USER_DEFINED) { char length[8]; itoa(struct_get_size(temp->val_type),length,10); add_code(3,"DEC",temp->name,length); } if(h->child_count==3) { translate_exp(h->child[2],temp->name,1); } break; }
intercodes gen_triop(relop_type type,operand c1,operand c2,operand label) { intercodes ic = (intercodes)malloc(sizeof(struct InterCodes)); ic->prev = ic->next = NULL; ic->code = (intercode)malloc(sizeof(struct InterCode)); ic->code->kind = IFGOTO_K; ic->code->u.triop.reltype = type; ic->code->u.triop.label = label; ic->code->u.triop.c1 = c1; ic->code->u.triop.c2 = c2; add_code(ic); return ic; }
static u4 create_attribute_code(CodeAttribute *ca, Statement *statement, FunctionPattern *pattern_list){ ca->max_stack = 0; ca->max_locals = 1; ca->code_length = 0; ca->exception_table_length = 0; ca->exception_table = NULL; /* generate prefix code */ if(statement->type == MAIN_STATEMENT){ add_code(ca, GETSTATIC, add_constant_field_info("java/lang/System", "out", "Ljava/io/PrintStream;")); if(ca->parameter_name) generate_main_prefix_code(ca); } /* generate function pattern */ while(pattern_list){ generate_function_pattern_code(ca, pattern_list); pattern_list = pattern_list->next; } /* generate function */ switch(statement->type){ case CONSTRUCTOR_STATEMENT: generate_constructor_code(ca); break; case MAIN_STATEMENT: generate_expression_code(ca, statement->expression); add_code(ca, INVOKEVIRTUAL, add_constant_method_info("java/io/PrintStream", "println", "(I)V")); add_code(ca, RETURN); break; case FUNCTION_STATEMENT: generate_expression_code(ca, statement->expression); add_code(ca, IRETURN); break; default: break; } return 12 + ca->code_length; }
int oprofile_add_domain_switch(int32_t domain_id) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; /* should have space for switching into and out of domain (2 slots each) plus one sample and one cpu mode switch */ if (((nr_available_slots(cpu_buf) < 6) && (domain_id != COORDINATOR_DOMAIN)) || (nr_available_slots(cpu_buf) < 2)) return 0; add_code(cpu_buf, CPU_DOMAIN_SWITCH); add_sample(cpu_buf, domain_id, 0); current_domain = domain_id; return 1; }
/* * This serves to add an escape code to indicate switching into * user space during tracing across the sysetm call boundary */ int oprofile_syscall_trace_boundary(void) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); if (!cpu_buf || !cpu_buf->tracing) return 0; if (nr_available_slots(cpu_buf) < 1) { cpu_buf->tracing = 0; cpu_buf->sample_lost_overflow++; return 0; } /* Set buffer state to user to prevent traces from being filtered out */ cpu_buf->last_is_kernel = 0; add_code(cpu_buf, CPU_IS_USER); return 1; }
spcs_s_status_t spcs_s_ocopyoutf(spcs_s_info_t *kstatus_a, spcs_s_info_t ustatus, spcs_s_status_t stcode, ...) { spcs_s_udata_t ret; va_list ap; spcs_s_udata_t c; spcs_s_pinfo_t *p; char *sp; #ifdef UNISTAT_TRACE cmn_err(CE_WARN, "!spcs_s_ocopyoutf entry"); #endif p = *(spcs_s_pinfo_t **)kstatus_a; c.s = stcode; if (check_revision(ustatus) == B_FALSE) { ret.s = EINVAL; } else { if (stcode) { if (add_code(p, stcode) == B_FALSE) { va_start(ap, stcode); while (c.f.sup_count--) { sp = va_arg(ap, caddr_t); if (sp != (char *)NULL) add_item(p, sp); } va_end(ap); } } ret.s = p->icount ? p->idata[last_code_idx(p)].s: SPCS_S_OK; scopyout(p, (spcs_s_pinfo_t *)ustatus); } spcs_s_kfree((spcs_s_info_t)p); *kstatus_a = NULL; #ifdef UNISTAT_TRACE cmn_err(CE_WARN, "!spcs_s_ocopyoutf exit"); #endif return (ret.s); }
void cheatsInstallCodesForEngine() { if(activeGame != NULL) { int i; u32 addr, val; int nextCodeCanBeHook = 1; SetupERL(); cheatsCheat_t *cheat = activeGame->cheats; while(cheat) { if(cheat->enabled) { for(i = 0; i < cheat->numCodeLines; ++i) { addr = (u32)*((u32 *)cheat->codeLines + 2*i); val = (u32)*((u32 *)cheat->codeLines + 2*i + 1); if(((addr & 0xfe000000) == 0x90000000) && nextCodeCanBeHook == 1) { printf("hook: %08X %08X\n", addr, val); add_hook(addr, val); } else { printf("code: %08X %08X\n", addr, val); add_code(addr, val); } if ((addr & 0xf0000000) == 0x40000000 || (addr & 0xf0000000) == 0x30000000) nextCodeCanBeHook = 0; else nextCodeCanBeHook = 1; } } cheat = cheat->next; } } }
/* * reverse number sign code generate method */ static void generate_minus_expression(CodeAttribute *ca, Expression *me){ generate_expression_code(ca, me); add_code(ca, INEG); }