static void opt_not(struct block *b) { struct block *tmp = JT(b); JT(b) = JF(b); JF(b) = tmp; }
static struct block * fold_edge(struct block *child, struct edge *ep) { int sense; int aval0, aval1, oval0, oval1; int code = ep->code; if (code < 0) { code = -code; sense = 0; } else sense = 1; if (child->s.code != code) return 0; aval0 = child->val[A_ATOM]; oval0 = child->oval; aval1 = ep->pred->val[A_ATOM]; oval1 = ep->pred->oval; if (aval0 != aval1) return 0; if (oval0 == oval1) /* * The operands of the branch instructions are * identical, so the result is true if a true * branch was taken to get here, otherwise false. */ return sense ? JT(child) : JF(child); if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K)) /* * At this point, we only know the comparison if we * came down the true branch, and it was an equality * comparison with a constant. * * I.e., if we came down the true branch, and the branch * was an equality comparison with a constant, we know the * accumulator contains that constant. If we came down * the false branch, or the comparison wasn't with a * constant, we don't know what was in the accumulator. * * We rely on the fact that distinct constants have distinct * value numbers. */ return JF(child); return 0; }
/* * Find dominator relationships. * Assumes graph has been leveled. */ static void find_dom(struct block *root) { int i; struct block *b; bpf_u_int32 *x; /* * Initialize sets to contain all nodes. */ x = all_dom_sets; i = n_blocks * nodewords; while (--i >= 0) *x++ = ~0; /* Root starts off empty. */ for (i = nodewords; --i >= 0;) root->dom[i] = 0; /* root->level is the highest level no found. */ for (i = root->level; i >= 0; --i) { for (b = levels[i]; b; b = b->link) { SET_INSERT(b->dom, b->id); if (JT(b) == 0) continue; SET_INTERSECT(JT(b)->dom, b->dom, nodewords); SET_INTERSECT(JF(b)->dom, b->dom, nodewords); } } }
/* * Return the number of nodes reachable by 'p'. * All nodes should be initially unmarked. */ static int count_blocks(struct block *p) { if (p == 0 || isMarked(p)) return 0; Mark(p); return count_blocks(JT(p)) + count_blocks(JF(p)) + 1; }
bool ScriptMethodsSpawnerNamespace::install() { const JNINativeMethod NATIVES[] = { #define JF(a,b,c) {a,b,(void*)(ScriptMethodsSpawnerNamespace::c)} JF("getServerSpawnLimit", "()I", getServerSpawnLimit), }; return JavaLibrary::registerNatives(NATIVES, sizeof(NATIVES)/sizeof(NATIVES[0])); }
bool ScriptMethodsRemoteDebugNamespace::install() { const JNINativeMethod NATIVES[] = { #define JF(a,b,c) {a,b,(void*)(ScriptMethodsRemoteDebugNamespace::c)} JF("printChannelMessage", "(Ljava/lang/String;Ljava/lang/String;)V", printChannelMessage), }; return JavaLibrary::registerNatives(NATIVES, sizeof(NATIVES)/sizeof(NATIVES[0])); }
static void intern_blocks(struct block *root) { struct block *p; int i, j; int done1; /* don't shadow global */ top: done1 = 1; for (i = 0; i < n_blocks; ++i) blocks[i]->link = 0; mark_code(root); for (i = n_blocks - 1; --i >= 0; ) { if (!isMarked(blocks[i])) continue; for (j = i + 1; j < n_blocks; ++j) { if (!isMarked(blocks[j])) continue; if (eq_blk(blocks[i], blocks[j])) { blocks[i]->link = blocks[j]->link ? blocks[j]->link : blocks[j]; break; } } } for (i = 0; i < n_blocks; ++i) { p = blocks[i]; if (JT(p) == 0) continue; if (JT(p)->link) { done1 = 0; JT(p) = JT(p)->link; } if (JF(p)->link) { done1 = 0; JF(p) = JF(p)->link; } } if (!done1) goto top; }
static void make_marks(struct block *p) { if (!isMarked(p)) { Mark(p); if (BPF_CLASS(p->s.code) != BPF_RET) { make_marks(JT(p)); make_marks(JF(p)); } } }
/* * Return the number of stmts in the flowgraph reachable by 'p'. * The nodes should be unmarked before calling. * * Note that "stmts" means "instructions", and that this includes * * side-effect statements in 'p' (slength(p->stmts)); * * statements in the true branch from 'p' (count_stmts(JT(p))); * * statements in the false branch from 'p' (count_stmts(JF(p))); * * the conditional jump itself (1); * * an extra long jump if the true branch requires it (p->longjt); * * an extra long jump if the false branch requires it (p->longjf). */ static u_int count_stmts(struct block *p) { u_int n; if (p == 0 || isMarked(p)) return 0; Mark(p); n = count_stmts(JT(p)) + count_stmts(JF(p)); return slength(p->stmts) + n + 1 + p->longjt + p->longjf; }
static void opt_j(struct edge *ep) { register int i, k; register struct block *target; if (JT(ep->succ) == 0) return; if (JT(ep->succ) == JF(ep->succ)) { /* * Common branch targets can be eliminated, provided * there is no data dependency. */ if (!use_conflict(ep->pred, ep->succ->et.succ)) { done = 0; ep->succ = JT(ep->succ); } } /* * For each edge dominator that matches the successor of this * edge, promote the edge successor to the its grandchild. * * XXX We violate the set abstraction here in favor a reasonably * efficient loop. */ top: for (i = 0; i < edgewords; ++i) { register bpf_u_int32 x = ep->edom[i]; while (x != 0) { k = ffs(x) - 1; x &=~ (1 << k); k += i * BITS_PER_WORD; target = fold_edge(ep->succ, edges[k]); /* * Check that there is no data dependency between * nodes that will be violated if we move the edge. */ if (target != 0 && !use_conflict(ep->pred, target)) { done = 0; ep->succ = target; if (JT(target) != 0) /* * Start over unless we hit a leaf. */ goto top; return; } } } }
static void find_levels_r(struct block *b) { int level; if (isMarked(b)) return; Mark(b); b->link = 0; if (JT(b)) { find_levels_r(JT(b)); find_levels_r(JF(b)); level = MAX(JT(b)->level, JF(b)->level) + 1; } else level = 0; b->level = level; b->link = levels[level]; levels[level] = b; }
/* * Do a depth first search on the flow graph, numbering the * the basic blocks, and entering them into the 'blocks' array.` */ static void number_blks_r(struct block *p) { int n; if (p == 0 || isMarked(p)) return; Mark(p); n = n_blocks++; p->id = n; blocks[n] = p; number_blks_r(JT(p)); number_blks_r(JF(p)); }
bool ScriptMethodsSystemNamespace::install() { const JNINativeMethod NATIVES[] = { #define JF(a,b,c) {a,b,(void*)(ScriptMethodsSystemNamespace::c)} JF("_sendConsoleCommand", "(Ljava/lang/String;J)Z", sendConsoleCommand), JF("getConfigSetting", "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;", getConfigSetting), JF("_fixHouseItemLimit", "(J)Z", fixHouseItemLimit), JF("_saveTextOnClient", "(JLjava/lang/String;Ljava/lang/String;)V", saveTextOnClient), JF("_saveBytesOnClient", "(JLjava/lang/String;[B)V", saveBytesOnClient), JF("_launchClientWebBrowser", "(JLjava/lang/String;)V", launchClientWebBrowser), JF("_playCutScene", "(JLjava/lang/String;)V", playCutScene), }; return JavaLibrary::registerNatives(NATIVES, sizeof(NATIVES)/sizeof(NATIVES[0])); }
int main(int argc, char **argv) { JS("null"); JS("true"); JS("false"); JS("0"); JS("1"); JS("-1"); JS("1.0"); JS("-1.0"); #ifdef JX_64_BIT JS("1.0000100000000001"); JS("-1.0000100000000001"); JS("9223372036854775807"); JS("-9223372036854775808"); #else JS("1.00001"); JS("-1.00001"); JS("2147483647"); JS("-2147483648"); #endif JS("[]"); JS("{}"); JS("\"hi\""); JS("[null]"); JS("[true]"); JS("[false]"); JS("[1]"); JS("[-1]"); JS("[\"one\"]"); JS("{1:2}"); JS("{-1:-2}"); JS("{\"one\":\"two\"}"); JS("[1,2]"); JS("[-1,-2]"); JS("[1,[2]]"); JS("[1,[2,3]]"); JS("{1:{2:3}}"); JS("[1,[2,3,4,5]]"); JS("{1:{2:3,4:5}}"); JS("[1,[2,\"three\",4,5]]"); /* below are all invalid and should give null */ JF(""); JF("-"); JF("{[]:1}"); JF("{{}:1}"); JF("{[1,2]:1}"); JF("{[1,2]:[3,4]}"); }
static void find_inedges(struct block *root) { int i; struct block *b; for (i = 0; i < n_blocks; ++i) blocks[i]->in_edges = 0; /* * Traverse the graph, adding each edge to the predecessor * list of its successors. Skip the leaves (i.e. level 0). */ for (i = root->level; i > 0; --i) { for (b = levels[i]; b != 0; b = b->link) { link_inedge(&b->et, JT(b)); link_inedge(&b->ef, JF(b)); } } }
static void opt_root(struct block **b) { struct slist *tmp, *s; s = (*b)->stmts; (*b)->stmts = 0; while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b)) *b = JT(*b); tmp = (*b)->stmts; if (tmp != 0) sappend(s, tmp); (*b)->stmts = s; /* * If the root node is a return, then there is no * point executing any statements (since the bpf machine * has no side effects). */ if (BPF_CLASS((*b)->s.code) == BPF_RET) (*b)->stmts = 0; }
/* * Find the backwards transitive closure of the flow graph. These sets * are backwards in the sense that we find the set of nodes that reach * a given node, not the set of nodes that can be reached by a node. * * Assumes graph has been leveled. */ static void find_closure(struct block *root) { int i; struct block *b; /* * Initialize sets to contain no nodes. */ memset((char *)all_closure_sets, 0, n_blocks * nodewords * sizeof(*all_closure_sets)); /* root->level is the highest level no found. */ for (i = root->level; i >= 0; --i) { for (b = levels[i]; b; b = b->link) { SET_INSERT(b->closure, b->id); if (JT(b) == 0) continue; SET_UNION(JT(b)->closure, b->closure, nodewords); SET_UNION(JF(b)->closure, b->closure, nodewords); } } }
/* * Assume graph is already leveled. */ static void find_ud(struct block *root) { int i, maxlevel; struct block *p; /* * root->level is the highest level no found; * count down from there. */ maxlevel = root->level; for (i = maxlevel; i >= 0; --i) for (p = levels[i]; p; p = p->link) { compute_local_ud(p); p->out_use = 0; } for (i = 1; i <= maxlevel; ++i) { for (p = levels[i]; p; p = p->link) { p->out_use |= JT(p)->in_use | JF(p)->in_use; p->in_use |= p->out_use &~ p->kill; } } }
static void opt_peep(struct block *b) { struct slist *s; struct slist *next, *last; int val; s = b->stmts; if (s == 0) return; last = s; for (/*empty*/; /*empty*/; s = next) { /* * Skip over nops. */ s = this_op(s); if (s == 0) break; /* nothing left in the block */ /* * Find the next real instruction after that one * (skipping nops). */ next = this_op(s->next); if (next == 0) break; /* no next instruction */ last = next; /* * st M[k] --> st M[k] * ldx M[k] tax */ if (s->s.code == BPF_ST && next->s.code == (BPF_LDX|BPF_MEM) && s->s.k == next->s.k) { done = 0; next->s.code = BPF_MISC|BPF_TAX; } /* * ld #k --> ldx #k * tax txa */ if (s->s.code == (BPF_LD|BPF_IMM) && next->s.code == (BPF_MISC|BPF_TAX)) { s->s.code = BPF_LDX|BPF_IMM; next->s.code = BPF_MISC|BPF_TXA; done = 0; } /* * This is an ugly special case, but it happens * when you say tcp[k] or udp[k] where k is a constant. */ if (s->s.code == (BPF_LD|BPF_IMM)) { struct slist *add, *tax, *ild; /* * Check that X isn't used on exit from this * block (which the optimizer might cause). * We know the code generator won't generate * any local dependencies. */ if (ATOMELEM(b->out_use, X_ATOM)) continue; /* * Check that the instruction following the ldi * is an addx, or it's an ldxms with an addx * following it (with 0 or more nops between the * ldxms and addx). */ if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B)) add = next; else add = this_op(next->next); if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X)) continue; /* * Check that a tax follows that (with 0 or more * nops between them). */ tax = this_op(add->next); if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX)) continue; /* * Check that an ild follows that (with 0 or more * nops between them). */ ild = this_op(tax->next); if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD || BPF_MODE(ild->s.code) != BPF_IND) continue; /* * We want to turn this sequence: * * (004) ldi #0x2 {s} * (005) ldxms [14] {next} -- optional * (006) addx {add} * (007) tax {tax} * (008) ild [x+0] {ild} * * into this sequence: * * (004) nop * (005) ldxms [14] * (006) nop * (007) nop * (008) ild [x+2] * * XXX We need to check that X is not * subsequently used, because we want to change * what'll be in it after this sequence. * * We know we can eliminate the accumulator * modifications earlier in the sequence since * it is defined by the last stmt of this sequence * (i.e., the last statement of the sequence loads * a value into the accumulator, so we can eliminate * earlier operations on the accumulator). */ ild->s.k += s->s.k; s->s.code = NOP; add->s.code = NOP; tax->s.code = NOP; done = 0; } } /* * If the comparison at the end of a block is an equality * comparison against a constant, and nobody uses the value * we leave in the A register at the end of a block, and * the operation preceding the comparison is an arithmetic * operation, we can sometime optimize it away. */ if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) && !ATOMELEM(b->out_use, A_ATOM)) { /* * We can optimize away certain subtractions of the * X register. */ if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) { val = b->val[X_ATOM]; if (vmap[val].is_const) { /* * If we have a subtract to do a comparison, * and the X register is a known constant, * we can merge this value into the * comparison: * * sub x -> nop * jeq #y jeq #(x+y) */ b->s.k += vmap[val].const_val; last->s.code = NOP; done = 0; } else if (b->s.k == 0) { /* * If the X register isn't a constant, * and the comparison in the test is * against 0, we can compare with the * X register, instead: * * sub x -> nop * jeq #0 jeq x */ last->s.code = NOP; b->s.code = BPF_JMP|BPF_JEQ|BPF_X; done = 0; } } /* * Likewise, a constant subtract can be simplified: * * sub #x -> nop * jeq #y -> jeq #(x+y) */ else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) { last->s.code = NOP; b->s.k += last->s.k; done = 0; } /* * And, similarly, a constant AND can be simplified * if we're testing against 0, i.e.: * * and #k nop * jeq #0 -> jset #k */ else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) && b->s.k == 0) { b->s.k = last->s.k; b->s.code = BPF_JMP|BPF_K|BPF_JSET; last->s.code = NOP; done = 0; opt_not(b); } } /* * jset #0 -> never * jset #ffffffff -> always */ if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) { if (b->s.k == 0) JT(b) = JF(b); if (b->s.k == (int)0xffffffff) JF(b) = JT(b); } /* * If we're comparing against the index register, and the index * register is a known constant, we can just compare against that * constant. */ val = b->val[X_ATOM]; if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) { bpf_int32 v = vmap[val].const_val; b->s.code &= ~BPF_X; b->s.k = v; } /* * If the accumulator is a known constant, we can compute the * comparison result. */ val = b->val[A_ATOM]; if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) { bpf_int32 v = vmap[val].const_val; switch (BPF_OP(b->s.code)) { case BPF_JEQ: v = v == b->s.k; break; case BPF_JGT: v = (unsigned)v > (unsigned)b->s.k; break; case BPF_JGE: v = (unsigned)v >= (unsigned)b->s.k; break; case BPF_JSET: v &= b->s.k; break; default: abort(); } if (JF(b) != JT(b)) done = 0; if (v) JF(b) = JT(b); else JT(b) = JF(b); } }
static int show_journal(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); struct reiserfs_super_block *rs = r->s_rs; struct journal_params *jp = &rs->s_v1.s_journal; char b[BDEVNAME_SIZE]; seq_printf(m, /* on-disk fields */ "jp_journal_1st_block: \t%i\n" "jp_journal_dev: \t%s[%x]\n" "jp_journal_size: \t%i\n" "jp_journal_trans_max: \t%i\n" "jp_journal_magic: \t%i\n" "jp_journal_max_batch: \t%i\n" "jp_journal_max_commit_age: \t%i\n" "jp_journal_max_trans_age: \t%i\n" /* incore fields */ "j_1st_reserved_block: \t%i\n" "j_state: \t%li\n" "j_trans_id: \t%u\n" "j_mount_id: \t%lu\n" "j_start: \t%lu\n" "j_len: \t%lu\n" "j_len_alloc: \t%lu\n" "j_wcount: \t%i\n" "j_bcount: \t%lu\n" "j_first_unflushed_offset: \t%lu\n" "j_last_flush_trans_id: \t%u\n" "j_trans_start_time: \t%li\n" "j_list_bitmap_index: \t%i\n" "j_must_wait: \t%i\n" "j_next_full_flush: \t%i\n" "j_next_async_flush: \t%i\n" "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n" /* reiserfs_proc_info_data_t.journal fields */ "in_journal: \t%12lu\n" "in_journal_bitmap: \t%12lu\n" "in_journal_reusable: \t%12lu\n" "lock_journal: \t%12lu\n" "lock_journal_wait: \t%12lu\n" "journal_begin: \t%12lu\n" "journal_relock_writers: \t%12lu\n" "journal_relock_wcount: \t%12lu\n" "mark_dirty: \t%12lu\n" "mark_dirty_already: \t%12lu\n" "mark_dirty_notjournal: \t%12lu\n" "restore_prepared: \t%12lu\n" "prepare: \t%12lu\n" "prepare_retry: \t%12lu\n", DJP(jp_journal_1st_block), bdevname(SB_JOURNAL(sb)->j_dev_bd, b), DJP(jp_journal_dev), DJP(jp_journal_size), DJP(jp_journal_trans_max), DJP(jp_journal_magic), DJP(jp_journal_max_batch), SB_JOURNAL(sb)->j_max_commit_age, DJP(jp_journal_max_trans_age), JF(j_1st_reserved_block), JF(j_state), JF(j_trans_id), JF(j_mount_id), JF(j_start), JF(j_len), JF(j_len_alloc), atomic_read(&r->s_journal->j_wcount), JF(j_bcount), JF(j_first_unflushed_offset), JF(j_last_flush_trans_id), JF(j_trans_start_time), JF(j_list_bitmap_index), JF(j_must_wait), JF(j_next_full_flush), JF(j_next_async_flush), JF(j_cnode_used), JF(j_cnode_free), SFPJ(in_journal), SFPJ(in_journal_bitmap), SFPJ(in_journal_reusable), SFPJ(lock_journal), SFPJ(lock_journal_wait), SFPJ(journal_being), SFPJ(journal_relock_writers), SFPJ(journal_relock_wcount), SFPJ(mark_dirty), SFPJ(mark_dirty_already), SFPJ(mark_dirty_notjournal), SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry) ); return 0; }
bool ScriptMethodsChatNamespace::install() { const JNINativeMethod NATIVES[] = { #define JF(a,b,c) {a,b,(void*)(ScriptMethodsChatNamespace::c)} JF("_chatCreateRoom", "(Ljava/lang/String;ZLjava/lang/String;Ljava/lang/String;)V", chatCreateRoom), JF("chatDestroyRoom", "(Ljava/lang/String;)V", chatDestroyRoom), JF("_chatEnterRoom", "(Ljava/lang/String;Ljava/lang/String;)V", chatEnterRoom), JF("_chatExitRoom", "(Ljava/lang/String;Ljava/lang/String;)V", chatExitRoom), JF("chatAddModeratorToRoom", "(Ljava/lang/String;Ljava/lang/String;)V", chatAddModeratorToRoom), JF("chatRemoveModeratorFromRoom", "(Ljava/lang/String;Ljava/lang/String;)V", chatRemoveModeratorFromRoom), JF("_chatSendInstantMessage", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", chatSendInstantMessage), JF("_chatSendPersistentMessage", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", chatSendPersistentMessage), JF("_chatSendToRoom", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", chatSendToRoom), JF("__packOutOfBandToken", "(JLjava/lang/String;I)Ljava/lang/String;", chatPackOutOfBandToken), JF("__packOutOfBandWaypoint", "(JLjava/lang/String;I)Ljava/lang/String;", chatPackOutOfBandWaypoint), JF("_packOutOfBandWaypointData", "(Ljava/lang/String;ILjava/lang/String;FFLscript/string_id;Ljava/lang/String;)Ljava/lang/String;", chatPackOutOfBandWaypointData), JF("__packOutOfBandProsePackage", "(Ljava/lang/String;ILscript/string_id;JJJLscript/string_id;Lscript/string_id;Lscript/string_id;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;IFZ)Ljava/lang/String;", packOutOfBandProsePackage), JF("sendSystemMessage", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", chatSendSystemMessage), JF("_sendSystemMessage", "(JLjava/lang/String;Ljava/lang/String;)V", chatSendSystemMessageObjId), JF("getEmoteFromCrc", "(I)Ljava/lang/String;", getEmoteFromCrc), JF("_sendQuestSystemMessage", "(JLjava/lang/String;Ljava/lang/String;)V", chatSendQuestSystemMessage), JF("sendFactionalSystemMessagePlanet", "(Ljava/lang/String;Lscript/location;FZZ)V", chatSendFactionalSystemMessagePlanet), }; return JavaLibrary::registerNatives(NATIVES, sizeof(NATIVES)/sizeof(NATIVES[0])); }
/* * Returns true if successful. Returns false if a branch has * an offset that is too large. If so, we have marked that * branch so that on a subsequent iteration, it will be treated * properly. */ static int convert_code_r(struct block *p) { struct bpf_insn *dst; struct slist *src; u_int slen; u_int off; int extrajmps; /* number of extra jumps inserted */ struct slist **offset = NULL; if (p == 0 || isMarked(p)) return (1); Mark(p); if (convert_code_r(JF(p)) == 0) return (0); if (convert_code_r(JT(p)) == 0) return (0); slen = slength(p->stmts); dst = ftail -= (slen + 1 + p->longjt + p->longjf); /* inflate length by any extra jumps */ p->offset = dst - fstart; /* generate offset[] for convenience */ if (slen) { offset = (struct slist **)calloc(slen, sizeof(struct slist *)); if (!offset) { bpf_error("not enough core"); /*NOTREACHED*/ } } src = p->stmts; for (off = 0; off < slen && src; off++) { #if 0 printf("off=%d src=%x\n", off, src); #endif offset[off] = src; src = src->next; } off = 0; for (src = p->stmts; src; src = src->next) { if (src->s.code == NOP) continue; dst->code = (u_short)src->s.code; dst->k = src->s.k; /* fill block-local relative jump */ if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) { #if 0 if (src->s.jt || src->s.jf) { bpf_error("illegal jmp destination"); /*NOTREACHED*/ } #endif goto filled; } if (off == slen - 2) /*???*/ goto filled; { u_int i; int jt, jf; static const char ljerr[] = "%s for block-local relative jump: off=%d"; #if 0 printf("code=%x off=%d %x %x\n", src->s.code, off, src->s.jt, src->s.jf); #endif if (!src->s.jt || !src->s.jf) { bpf_error(ljerr, "no jmp destination", off); /*NOTREACHED*/ } jt = jf = 0; for (i = 0; i < slen; i++) { if (offset[i] == src->s.jt) { if (jt) { bpf_error(ljerr, "multiple matches", off); /*NOTREACHED*/ } dst->jt = i - off - 1; jt++; } if (offset[i] == src->s.jf) { if (jf) { bpf_error(ljerr, "multiple matches", off); /*NOTREACHED*/ } dst->jf = i - off - 1; jf++; } } if (!jt || !jf) { bpf_error(ljerr, "no destination found", off); /*NOTREACHED*/ } } filled: ++dst; ++off; } if (offset) free(offset); #ifdef BDEBUG bids[dst - fstart] = p->id + 1; #endif dst->code = (u_short)p->s.code; dst->k = p->s.k; if (JT(p)) { extrajmps = 0; off = JT(p)->offset - (p->offset + slen) - 1; if (off >= 256) { /* offset too large for branch, must add a jump */ if (p->longjt == 0) { /* mark this instruction and retry */ p->longjt++; return(0); } /* branch if T to following jump */ dst->jt = extrajmps; extrajmps++; dst[extrajmps].code = BPF_JMP|BPF_JA; dst[extrajmps].k = off - extrajmps; } else dst->jt = off; off = JF(p)->offset - (p->offset + slen) - 1; if (off >= 256) { /* offset too large for branch, must add a jump */ if (p->longjf == 0) { /* mark this instruction and retry */ p->longjf++; return(0); } /* branch if F to following jump */ /* if two jumps are inserted, F goes to second one */ dst->jf = extrajmps; extrajmps++; dst[extrajmps].code = BPF_JMP|BPF_JA; dst[extrajmps].k = off - extrajmps; } else dst->jf = off; } return (1); }
int reiserfs_journal_in_proc( char *buffer, char **start, off_t offset, int count, int *eof, void *data ) { struct super_block *sb; struct reiserfs_sb_info *r; struct reiserfs_super_block *rs; int len = 0; sb = procinfo_prologue( ( kdev_t ) ( long ) data ); if( sb == NULL ) return -ENOENT; r = &sb->u.reiserfs_sb; rs = r -> s_rs; len += sprintf( &buffer[ len ], /* on-disk fields */ "s_journal_block: \t%i\n" "s_journal_dev: \t%s[%x]\n" "s_orig_journal_size: \t%i\n" "s_journal_trans_max: \t%i\n" "s_journal_block_count: \t%i\n" "s_journal_max_batch: \t%i\n" "s_journal_max_commit_age: \t%i\n" "s_journal_max_trans_age: \t%i\n" /* incore fields */ "j_state: \t%li\n" "j_trans_id: \t%lu\n" "j_mount_id: \t%lu\n" "j_start: \t%lu\n" "j_len: \t%lu\n" "j_len_alloc: \t%lu\n" "j_wcount: \t%i\n" "j_bcount: \t%lu\n" "j_first_unflushed_offset: \t%lu\n" "j_last_flush_trans_id: \t%lu\n" "j_trans_start_time: \t%li\n" "j_journal_list_index: \t%i\n" "j_list_bitmap_index: \t%i\n" "j_must_wait: \t%i\n" "j_next_full_flush: \t%i\n" "j_next_async_flush: \t%i\n" "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n" /* reiserfs_proc_info_data_t.journal fields */ "in_journal: \t%12lu\n" "in_journal_bitmap: \t%12lu\n" "in_journal_reusable: \t%12lu\n" "lock_journal: \t%12lu\n" "lock_journal_wait: \t%12lu\n" "journal_begin: \t%12lu\n" "journal_relock_writers: \t%12lu\n" "journal_relock_wcount: \t%12lu\n" "mark_dirty: \t%12lu\n" "mark_dirty_already: \t%12lu\n" "mark_dirty_notjournal: \t%12lu\n" "restore_prepared: \t%12lu\n" "prepare: \t%12lu\n" "prepare_retry: \t%12lu\n", DJF( s_journal_block ), DJF( s_journal_dev ) == 0 ? "none" : bdevname( DJF( s_journal_dev ) ), DJF( s_journal_dev ), DJF( s_orig_journal_size ), DJF( s_journal_trans_max ), DJF( s_journal_block_count ), DJF( s_journal_max_batch ), DJF( s_journal_max_commit_age ), DJF( s_journal_max_trans_age ), JF( j_state ), JF( j_trans_id ), JF( j_mount_id ), JF( j_start ), JF( j_len ), JF( j_len_alloc ), atomic_read( & r -> s_journal -> j_wcount ), JF( j_bcount ), JF( j_first_unflushed_offset ), JF( j_last_flush_trans_id ), JF( j_trans_start_time ), JF( j_journal_list_index ), JF( j_list_bitmap_index ), JF( j_must_wait ), JF( j_next_full_flush ), JF( j_next_async_flush ), JF( j_cnode_used ), JF( j_cnode_free ), SFPJ( in_journal ), SFPJ( in_journal_bitmap ), SFPJ( in_journal_reusable ), SFPJ( lock_journal ), SFPJ( lock_journal_wait ), SFPJ( journal_being ), SFPJ( journal_relock_writers ), SFPJ( journal_relock_wcount ), SFPJ( mark_dirty ), SFPJ( mark_dirty_already ), SFPJ( mark_dirty_notjournal ), SFPJ( restore_prepared ), SFPJ( prepare ), SFPJ( prepare_retry ) ); procinfo_epilogue( sb ); return reiserfs_proc_tail( len, buffer, start, offset, count, eof ); }
static void and_pullup(struct block *b) { int val, at_top; struct block *pull; struct block **diffp, **samep; struct edge *ep; ep = b->in_edges; if (ep == 0) return; /* * Make sure each predecessor loads the same value. */ val = ep->pred->val[A_ATOM]; for (ep = ep->next; ep != 0; ep = ep->next) if (val != ep->pred->val[A_ATOM]) return; if (JT(b->in_edges->pred) == b) diffp = &JT(b->in_edges->pred); else diffp = &JF(b->in_edges->pred); at_top = 1; while (1) { if (*diffp == 0) return; if (JF(*diffp) != JF(b)) return; if (!SET_MEMBER((*diffp)->dom, b->id)) return; if ((*diffp)->val[A_ATOM] != val) break; diffp = &JT(*diffp); at_top = 0; } samep = &JT(*diffp); while (1) { if (*samep == 0) return; if (JF(*samep) != JF(b)) return; if (!SET_MEMBER((*samep)->dom, b->id)) return; if ((*samep)->val[A_ATOM] == val) break; /* XXX Need to check that there are no data dependencies between diffp and samep. Currently, the code generator will not produce such dependencies. */ samep = &JT(*samep); } #ifdef notdef /* XXX This doesn't cover everything. */ for (i = 0; i < N_ATOMS; ++i) if ((*samep)->val[i] != pred->val[i]) return; #endif /* Pull up the node. */ pull = *samep; *samep = JT(pull); JT(pull) = *diffp; /* * At the top of the chain, each predecessor needs to point at the * pulled up node. Inside the chain, there is only one predecessor * to worry about. */ if (at_top) { for (ep = b->in_edges; ep != 0; ep = ep->next) { if (JT(ep->pred) == b) JT(ep->pred) = pull; else JF(ep->pred) = pull; } } else *diffp = pull; done = 0; }
bool ScriptMethodsServerUINamespace::install() { const JNINativeMethod NATIVES[] = { #define JF(a,b,c) {a,b,(void*)(ScriptMethodsServerUINamespace::c)} JF("_createSUIPage", "(Ljava/lang/String;JJ)I", createSuiPage), JF("clearSUIDataSource", "(ILjava/lang/String;)Z", clearSuiDataSource), JF("clearSUIDataSourceContainer", "(ILjava/lang/String;)Z", clearSuiDataSourceContainer), JF("addSUIDataItem", "(ILjava/lang/String;Ljava/lang/String;)Z", addSuiDataItem), JF("addSUIDataSourceContainer", "(ILjava/lang/String;Ljava/lang/String;)Z", addSuiDataSourceContainer), JF("addSUIDataSource", "(ILjava/lang/String;Ljava/lang/String;)Z", addSuiDataSource), JF("addSUIChildWidget", "(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)Z", addSuiChildWidget), JF("setSUIProperty", "(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)Z", setSuiProperty), JF("subscribeToSUIEvent", "(IILjava/lang/String;Ljava/lang/String;)Z", subscribeToSuiEvent), JF("subscribeToSUIPropertyForEvent", "(IILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)Z", subscribeToSuiPropertyForEvent), JF("showSUIPage", "(I)Z", showSuiPage), JF("forceCloseSUIPage", "(I)Z", forceCloseSuiPage), JF("_setSUIAssociatedObject", "(IJ)Z", setSuiAssociatedObject), JF("_setSUIAssociatedLocation", "(IJ)Z", setSuiAssociatedLocation), JF("setSUIMaxRangeToObject", "(IF)Z", setSuiMaxRangeToObject), JF("_clientMinigameOpen", "(JLscript/dictionary;)Z", clientMinigameOpen), JF("_clientMinigameClose", "(JLscript/dictionary;)Z", clientMinigameClose), }; return JavaLibrary::registerNatives(NATIVES, sizeof(NATIVES)/sizeof(NATIVES[0])); }