/* calculate required # of delay slots between the instruction that * assigns a value and the one that consumes */ int ir3_delayslots(struct ir3_instruction *assigner, struct ir3_instruction *consumer, unsigned n) { if (ignore_dep(assigner, consumer, n)) return 0; /* worst case is cat1-3 (alu) -> cat4/5 needing 6 cycles, normal * alu -> alu needs 3 cycles, cat4 -> alu and texture fetch * handled with sync bits */ if (is_meta(assigner) || is_meta(consumer)) return 0; if (writes_addr(assigner)) return 6; /* handled via sync flags: */ if (is_sfu(assigner) || is_tex(assigner) || is_mem(assigner)) return 0; /* assigner must be alu: */ if (is_flow(consumer) || is_sfu(consumer) || is_tex(consumer) || is_mem(consumer)) { return 6; } else if ((is_mad(consumer->opc) || is_madsh(consumer->opc)) && (n == 3)) { /* special case, 3rd src to cat3 not required on first cycle */ return 1; } else { return 3; } }
pair<environment, expr> mk_aux_lemma(environment const & env, metavar_context const & mctx, local_context const & lctx, name const & c, expr const & type, expr const & value) { type_context ctx(env, options(), mctx, lctx, transparency_mode::All); bool is_lemma = true; optional<bool> is_meta(false); return mk_aux_definition_fn(ctx)(c, type, value, is_lemma, is_meta); }
std::pair<level, justification> substitution::instantiate_metavars(level const & l, bool use_jst) { if (!has_meta(l)) return mk_pair(l, justification()); justification j; auto save_jst = [&](justification const & j2) { j = mk_composite1(j, j2); }; level r = replace(l, [&](level const & l) { if (!has_meta(l)) { return some_level(l); } else if (is_meta(l)) { auto p1 = get_assignment(l); if (p1) { auto p2 = instantiate_metavars(p1->first, use_jst); if (use_jst) { justification new_jst = mk_composite1(p1->second, p2.second); assign(meta_id(l), p2.first, new_jst); save_jst(new_jst); } else { assign(meta_id(l), p2.first); } return some_level(p2.first); } } return none_level(); }); return mk_pair(r, j); }
level collect(level const & l) { return replace(l, [&](level const & l) { if (is_meta(l)) { name const & id = meta_id(l); if (auto r = m_univ_meta_to_param.find(id)) { return some_level(*r); } else { name n = m_prefix.append_after(m_next_idx); m_next_idx++; level new_r = mk_param_univ(n); m_univ_meta_to_param.insert(id, new_r); m_univ_meta_to_param_inv.insert(n, l); m_level_params.push_back(n); return some_level(new_r); } } else if (is_param(l)) { name const & id = param_id(l); if (!m_found_univ_params.contains(id)) { m_found_univ_params.insert(id); m_level_params.push_back(id); } } return none_level(); }); }
/* calculate required # of delay slots between the instruction that * assigns a value and the one that consumes */ int ir3_delayslots(struct ir3_instruction *assigner, struct ir3_instruction *consumer, unsigned n) { /* worst case is cat1-3 (alu) -> cat4/5 needing 6 cycles, normal * alu -> alu needs 3 cycles, cat4 -> alu and texture fetch * handled with sync bits */ if (is_meta(assigner)) return 0; /* handled via sync flags: */ if (is_sfu(assigner) || is_tex(assigner)) return 0; /* assigner must be alu: */ if (is_sfu(consumer) || is_tex(consumer)) { return 8; } else if ((consumer->category == 3) && is_mad(consumer->opc) && (n == 2)) { /* special case, 3rd src to cat3 not required on first cycle */ return 2; } else { return 5; } }
static struct ir3_instruction * instr_cp_fanin(struct ir3_instruction *instr) { unsigned i; /* we need to handle fanin specially, to detect cases * when we need to keep a mov */ for (i = 1; i < instr->regs_count; i++) { struct ir3_register *src = instr->regs[i]; if (src->flags & IR3_REG_SSA) { struct ir3_instruction *cand = instr_cp(src->instr, false); /* if the candidate is a fanout, then keep * the move. * * This is a bit, um, fragile, but it should * catch the extra mov's that the front-end * puts in for us already in these cases. */ if (is_meta(cand) && (cand->opc == OPC_META_FO)) cand = instr_cp(src->instr, true); src->instr = cand; } } walk_children(instr, false); return instr; }
void print_procedure(FILE* dest, struct ast_node_t* node, int padding, int wrap) { ast_node_t* name = node->value.child; ast_node_t* params = name->next; ast_node_t* body = params->next; int new_pad = padding + 1; fprintf(dest, "("); fprintf(dest, "lambda"); print_separator(dest, new_pad, 0); fprintf(dest, "("); ast_node_t* param = params->value.child; while (param) { if (!is_meta(param->type)) { print_single_node(dest, param->value.child, new_pad, 0); print_separator(dest, new_pad, 0); } param = param->next; } fprintf(dest, ")"); print_separator(dest, new_pad, 1); print_single_node(dest, body, new_pad, 1); fprintf(dest, ")"); }
bool is_pi_meta(expr const & e) { if (is_pi(e)) { return is_pi_meta(binding_body(e)); } else { return is_meta(e); } }
static format pp_child(level const & l, bool unicode, unsigned indent) { if (is_explicit(l) || is_param(l) || is_meta(l) || is_global(l)) { return pp(l, unicode, indent); } else { return paren(pp(l, unicode, indent)); } }
static void print_child(std::ostream & out, level const & l) { if (is_explicit(l) || is_param(l) || is_meta(l) || is_global(l)) { print(out, l); } else { out << "("; print(out, l); out << ")"; } }
static unsigned distance(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr, unsigned maxd) { struct ir3_instruction *n = ctx->scheduled; unsigned d = 0; while (n && (n != instr) && (d < maxd)) { if (!is_meta(n)) d++; n = n->next; } return d; }
optional<expr> has_expr_metavar_strict(expr const & e) { if (!has_expr_metavar(e)) return none_expr(); optional<expr> r; for_each(e, [&](expr const & e, unsigned) { if (r || !has_expr_metavar(e)) return false; if (is_meta(e)) { r = e; return false; } if (is_local(e)) return false; // do not visit type return true; }); return r; }
static void instr_find_neighbors(struct ir3_instruction *instr) { struct ir3_instruction *src; if (ir3_instr_check_mark(instr)) return; if (is_meta(instr) && (instr->opc == OPC_META_FI)) group_n(&instr_ops, instr, instr->regs_count - 1); foreach_ssa_src(src, instr) instr_find_neighbors(src); }
int print_variable_decl(FILE* dest, struct ast_node_t* node, int padding, int wrap) { ast_node_t* ident = node->value.child; ast_node_t* value = node->value.child->next; int new_pad = padding + 1; int is_global = (padding == 0); fprintf(dest, "("); if (is_global) { fprintf(dest, "define"); } else { fprintf(dest, "let"); } print_separator(dest, new_pad, 0); if (!is_global) { fprintf(dest, "("); fprintf(dest, "("); } print_single_node(dest, ident, new_pad, 0); print_separator(dest, new_pad, 0); if (value && !is_meta(value->type)) { print_single_node(dest, value, new_pad, 0); } else { fprintf(dest, "'undefined"); } int completed; if (!is_global) { fprintf(dest, ")"); fprintf(dest, ")"); print_separator(dest, new_pad, 1); print_naked_list(dest, node->next, new_pad, wrap); completed = 1; } else { completed = 0; } fprintf(dest, ")"); return completed; }
int print_single_node(FILE* dest, struct ast_node_t* node, int padding, int wrap) { TOKEN_TYPE_T type = node->type; if (is_meta(type)) { //ignore } else if (is_atomic(type)) { print_atomic(dest, node, padding, wrap); } else { return print_compozite(dest, node, padding, wrap); } return 0; }
void collect_locals(expr const & e, collected_locals & ls, bool restricted) { if (!has_local(e)) return; expr_set visited; std::function<void(expr const & e)> visit = [&](expr const & e) { if (!has_local(e)) return; if (restricted && is_meta(e)) return; if (visited.find(e) != visited.end()) return; visited.insert(e); switch (e.kind()) { case expr_kind::Var: case expr_kind::Constant: case expr_kind::Sort: break; // do nothing case expr_kind::Local: if (!restricted) visit(mlocal_type(e)); ls.insert(e); break; case expr_kind::Meta: lean_assert(!restricted); visit(mlocal_type(e)); break; case expr_kind::Macro: for (unsigned i = 0; i < macro_num_args(e); i++) visit(macro_arg(e, i)); break; case expr_kind::App: visit(app_fn(e)); visit(app_arg(e)); break; case expr_kind::Lambda: case expr_kind::Pi: visit(binding_domain(e)); visit(binding_body(e)); break; case expr_kind::Let: visit(let_type(e)); visit(let_value(e)); visit(let_body(e)); break; } }; visit(e); }
static void print_instr_name(struct ir3_instruction *instr) { #ifdef DEBUG printf("%04u:", instr->serialno); #endif printf("%03u: ", instr->depth); if (instr->flags & IR3_INSTR_SY) printf("(sy)"); if (instr->flags & IR3_INSTR_SS) printf("(ss)"); if (is_meta(instr)) { switch(instr->opc) { case OPC_META_PHI: printf("Φ"); break; default: /* shouldn't hit here.. just for debugging: */ switch (instr->opc) { case OPC_META_INPUT: printf("_meta:in"); break; case OPC_META_FO: printf("_meta:fo"); break; case OPC_META_FI: printf("_meta:fi"); break; default: printf("_meta:%d", instr->opc); break; } break; } } else if (instr->category == 1) { static const char *type[] = { [TYPE_F16] = "f16", [TYPE_F32] = "f32", [TYPE_U16] = "u16", [TYPE_U32] = "u32", [TYPE_S16] = "s16", [TYPE_S32] = "s32", [TYPE_U8] = "u8", [TYPE_S8] = "s8", }; if (instr->cat1.src_type == instr->cat1.dst_type) printf("mov"); else printf("cov"); printf(".%s%s", type[instr->cat1.src_type], type[instr->cat1.dst_type]); } else {
static struct ir3_instruction * instr_cp(struct ir3_instruction *instr, bool keep) { /* if we've already visited this instruction, bail now: */ if (ir3_instr_check_mark(instr)) return instr; if (is_meta(instr) && (instr->opc == OPC_META_FI)) return instr_cp_fanin(instr); if (is_eligible_mov(instr) && !keep) { struct ir3_register *src = instr->regs[1]; return instr_cp(src->instr, false); } walk_children(instr, false); return instr; }
static void ir3_instr_depth(struct ir3_instruction *instr) { unsigned i; /* if we've already visited this instruction, bail now: */ if (ir3_instr_check_mark(instr)) return; instr->depth = 0; for (i = 1; i < instr->regs_count; i++) { struct ir3_register *src = instr->regs[i]; if (src->flags & IR3_REG_SSA) { unsigned sd; /* visit child to compute it's depth: */ ir3_instr_depth(src->instr); sd = ir3_delayslots(src->instr, instr, i-1) + src->instr->depth; instr->depth = MAX2(instr->depth, sd); } } /* meta-instructions don't add cycles, other than PHI.. which * might translate to a real instruction.. * * well, not entirely true, fan-in/out, etc might need to need * to generate some extra mov's in edge cases, etc.. probably * we might want to do depth calculation considering the worst * case for these?? */ if (!is_meta(instr)) instr->depth++; insert_by_depth(instr); }
static unsigned delay_calc2(struct ir3_sched_ctx *ctx, struct ir3_instruction *assigner, struct ir3_instruction *consumer, unsigned srcn) { unsigned delay = 0; if (is_meta(assigner)) { unsigned i; for (i = 1; i < assigner->regs_count; i++) { struct ir3_register *reg = assigner->regs[i]; if (reg->flags & IR3_REG_SSA) { unsigned d = delay_calc2(ctx, reg->instr, consumer, srcn); delay = MAX2(delay, d); } } } else { delay = ir3_delayslots(assigner, consumer, srcn); delay -= distance(ctx, assigner, delay); } return delay; }
name const & level_id(level const & l) { lean_assert(is_param(l) || is_global(l) || is_meta(l)); return to_param_core(l).m_id; }
/** * egg_accelerator_parse_virtual: * @accelerator: string representing an accelerator * @accelerator_key: return location for accelerator keyval * @accelerator_mods: return location for accelerator modifier mask * * Parses a string representing a virtual accelerator. The format * looks like "<Control>a" or "<Shift><Alt>F1" or * "<Release>z" (the last one is for key release). The parser * is fairly liberal and allows lower or upper case, and also * abbreviations such as "<Ctl>" and "<Ctrl>". * * If the parse fails, @accelerator_key and @accelerator_mods will * be set to 0 (zero) and %FALSE will be returned. If the string contains * only modifiers, @accelerator_key will be set to 0 but %TRUE will be * returned. * * The virtual vs. concrete accelerator distinction is a relic of * how the X Window System works; there are modifiers Mod2-Mod5 that * can represent various keyboard keys (numlock, meta, hyper, etc.), * the virtual modifier represents the keyboard key, the concrete * modifier the actual Mod2-Mod5 bits in the key press event. * * Returns: %TRUE on success. */ gboolean egg_accelerator_parse_virtual (const gchar *accelerator, guint *accelerator_key, EggVirtualModifierType *accelerator_mods) { guint keyval; GdkModifierType mods; gint len; gboolean bad_keyval; if (accelerator_key) *accelerator_key = 0; if (accelerator_mods) *accelerator_mods = 0; g_return_val_if_fail (accelerator != NULL, FALSE); bad_keyval = FALSE; keyval = 0; mods = 0; len = strlen (accelerator); while (len) { if (*accelerator == '<') { if (len >= 9 && is_release (accelerator)) { accelerator += 9; len -= 9; mods |= EGG_VIRTUAL_RELEASE_MASK; } else if (len >= 9 && is_control (accelerator)) { accelerator += 9; len -= 9; mods |= EGG_VIRTUAL_CONTROL_MASK; } else if (len >= 7 && is_shift (accelerator)) { accelerator += 7; len -= 7; mods |= EGG_VIRTUAL_SHIFT_MASK; } else if (len >= 6 && is_shft (accelerator)) { accelerator += 6; len -= 6; mods |= EGG_VIRTUAL_SHIFT_MASK; } else if (len >= 6 && is_ctrl (accelerator)) { accelerator += 6; len -= 6; mods |= EGG_VIRTUAL_CONTROL_MASK; } else if (len >= 6 && is_modx (accelerator)) { static const guint mod_vals[] = { EGG_VIRTUAL_ALT_MASK, EGG_VIRTUAL_MOD2_MASK, EGG_VIRTUAL_MOD3_MASK, EGG_VIRTUAL_MOD4_MASK, EGG_VIRTUAL_MOD5_MASK }; len -= 6; accelerator += 4; mods |= mod_vals[*accelerator - '1']; accelerator += 2; } else if (len >= 5 && is_ctl (accelerator)) { accelerator += 5; len -= 5; mods |= EGG_VIRTUAL_CONTROL_MASK; } else if (len >= 5 && is_alt (accelerator)) { accelerator += 5; len -= 5; mods |= EGG_VIRTUAL_ALT_MASK; } else if (len >= 6 && is_meta (accelerator)) { accelerator += 6; len -= 6; mods |= EGG_VIRTUAL_META_MASK; } else if (len >= 7 && is_hyper (accelerator)) { accelerator += 7; len -= 7; mods |= EGG_VIRTUAL_HYPER_MASK; } else if (len >= 7 && is_super (accelerator)) { accelerator += 7; len -= 7; mods |= EGG_VIRTUAL_SUPER_MASK; } else if (len >= 9 && is_primary (accelerator)) { accelerator += 9; len -= 9; mods |= EGG_VIRTUAL_CONTROL_MASK; } else { gchar last_ch; last_ch = *accelerator; while (last_ch && last_ch != '>') { last_ch = *accelerator; accelerator += 1; len -= 1; } } } else { keyval = gdk_keyval_from_name (accelerator); if (keyval == 0) bad_keyval = TRUE; accelerator += len; len -= len; } } if (accelerator_key) *accelerator_key = gdk_keyval_to_lower (keyval); if (accelerator_mods) *accelerator_mods = mods; return !bad_keyval; }
/** \brief Given a term <tt>a : a_type</tt>, and a metavariable \c m, creates a constraint that considers coercions from a_type to the type assigned to \c m. */ constraint mk_coercion_cnstr(type_checker & from_tc, type_checker & to_tc, coercion_info_manager & infom, expr const & m, expr const & a, expr const & a_type, justification const & j, unsigned delay_factor, bool lift_coe) { auto choice_fn = [=, &from_tc, &to_tc, &infom](expr const & meta, expr const & d_type, substitution const & s) { expr new_a_type; justification new_a_type_jst; if (is_meta(a_type)) { auto p = substitution(s).instantiate_metavars(a_type); new_a_type = p.first; new_a_type_jst = p.second; } else { new_a_type = a_type; } if (is_meta(new_a_type)) { if (delay_factor < to_delay_factor(cnstr_group::DelayedChoice)) { // postpone... return lazy_list<constraints>(constraints(mk_coercion_cnstr(from_tc, to_tc, infom, m, a, a_type, justification(), delay_factor+1, lift_coe))); } else { // giveup... return lazy_list<constraints>(constraints(mk_eq_cnstr(meta, a, justification()))); } } constraint_seq cs; new_a_type = from_tc.whnf(new_a_type, cs); if ((lift_coe && is_pi_meta(d_type)) || (!lift_coe && is_meta(d_type))) { // case-split buffer<expr> locals; expr it_from = new_a_type; expr it_to = d_type; while (is_pi(it_from) && is_pi(it_to)) { expr dom_from = binding_domain(it_from); expr dom_to = binding_domain(it_to); if (!from_tc.is_def_eq(dom_from, dom_to, justification(), cs)) return lazy_list<constraints>(); expr local = mk_local(mk_fresh_name(), binding_name(it_from), dom_from, binder_info()); locals.push_back(local); it_from = instantiate(binding_body(it_from), local); it_to = instantiate(binding_body(it_to), local); } buffer<expr> alts; get_coercions_from(from_tc.env(), it_from, alts); expr fn_a; if (!locals.empty()) fn_a = mk_local(mk_fresh_name(), "f", new_a_type, binder_info()); buffer<constraints> choices; buffer<expr> coes; // first alternative: no coercion constraint_seq cs1 = cs + mk_eq_cnstr(meta, a, justification()); choices.push_back(cs1.to_list()); unsigned i = alts.size(); while (i > 0) { --i; expr coe = alts[i]; if (!locals.empty()) coe = Fun(fn_a, Fun(locals, mk_app(coe, mk_app(fn_a, locals)))); expr new_a = copy_tag(a, mk_app(coe, a)); coes.push_back(coe); constraint_seq csi = cs + mk_eq_cnstr(meta, new_a, new_a_type_jst); choices.push_back(csi.to_list()); } return choose(std::make_shared<coercion_elaborator>(infom, meta, to_list(choices.begin(), choices.end()), to_list(coes.begin(), coes.end()))); } else { list<expr> coes = get_coercions_from_to(from_tc, to_tc, new_a_type, d_type, cs, lift_coe); if (is_nil(coes)) { expr new_a = a; infom.erase_coercion_info(a); cs += mk_eq_cnstr(meta, new_a, new_a_type_jst); return lazy_list<constraints>(cs.to_list()); } else if (is_nil(tail(coes))) { expr new_a = copy_tag(a, mk_app(head(coes), a)); infom.save_coercion_info(a, new_a); cs += mk_eq_cnstr(meta, new_a, new_a_type_jst); return lazy_list<constraints>(cs.to_list()); } else { list<constraints> choices = map2<constraints>(coes, [&](expr const & coe) { expr new_a = copy_tag(a, mk_app(coe, a)); constraint c = mk_eq_cnstr(meta, new_a, new_a_type_jst); return (cs + c).to_list(); }); return choose(std::make_shared<coercion_elaborator>(infom, meta, choices, coes, false)); } } }; return mk_choice_cnstr(m, choice_fn, delay_factor, true, j); }
int main() { unsigned char *remotehost, *remoteinfo, *remoteip, *remoteport; unsigned char query[256]; unsigned char clean_query[256]; unsigned char *qptr, *qptr2; int len, query_len; int fd, r = 0; struct cdb c; stralloc answer = {0}; /* chroot() to $ROOT and switch to $UID:$GID */ droproot("dffingerd: "); /* since we run under tcpserver, we can get all info about the remote side from the enviroment */ remotehost = env_get("TCPREMOTEHOST"); if (!remotehost) remotehost = "unknown"; remoteinfo = env_get("TCPREMOTEINFO"); if (!remoteinfo) remoteinfo = "-"; remoteip = env_get("TCPREMOTEIP"); if (!remoteip) remoteip = "unknown"; remoteport = env_get("TCPREMOTEPORT"); if (!remoteport) remoteport = "unknown"; /* now: remotehost is the remote hostname or "unknown" remoteinfo is some ident string or "-" remoteip is the remote ipadress or "unknown" (?) */ /* Read the request from the client and \0-terminate it */ /* timeout after 60 seconds */ query_len = timeoutread(60, stdin, query, sizeof(query) - 1); query[query_len] = '\0'; /* Handle RfC 1288 stuff */ qptr=query; if (*qptr==' ') qptr++; if (*qptr=='/' && (*(qptr+1)=='W' || *(qptr+1)=='w') && *(qptr+2) == ' ') qptr+=3; /* \0-terminate query at the first \r or \n */ for (len = 0; query[len]; len++) { if (query[len] == '\r' || query[len] == '\n') { query[len] = '\0'; break; } } /* clean up query string a bit by removing chars witch could clobber logging or so and replace them with _ -> extra Paranoia */ for(qptr2 = clean_query; *qptr; qptr++) { if(is_meta (*qptr)) { *qptr2++ = '_'; } else { *qptr2++ = *qptr; } } *qptr2 = '\0'; /* Do logging */ buffer_puts(buffer_2, remotehost); buffer_puts(buffer_2, " ["); buffer_puts(buffer_2, remoteip); buffer_puts(buffer_2, ":"); buffer_puts(buffer_2, remoteport); buffer_puts(buffer_2, "] "); buffer_puts(buffer_2, remoteinfo); buffer_puts(buffer_2, " "); buffer_puts(buffer_2, clean_query); buffer_puts(buffer_2, "\n"); buffer_flush(buffer_2); /* If there was any data we will go on */ if (query_len > 0) { /* Open & init our cdb */ fd = open_read("data.cdb"); if (fd == -1) { /* If opening failed quit */ strerr_die2sys(111, FATAL, "can't open data.cdb"); } cdb_init(&c, fd); /* Search query for "user" on the database */ r = cdb_find(&c, clean_query, str_len(clean_query)); if (r == 1) { /* read data */ stralloc_ready(&answer, cdb_datalen(&c)); if (cdb_read(&c, answer.s, cdb_datalen(&c), cdb_datapos(&c)) == -1) { strerr_die2sys(111, FATAL, "can't read from data.cdb"); } else { answer.len = cdb_datalen(&c); } } else { /* We didn't find the requested user, try DEFAULTUSER */ r = cdb_find(&c,DEFAULTUSER, str_len(DEFAULTUSER)); if (r == 1) { /* read data */ stralloc_ready(&answer, cdb_datalen(&c)); if (cdb_read(&c, answer.s ,cdb_datalen(&c) ,cdb_datapos(&c)) == -1) { strerr_die2sys(111, FATAL, "can't read from data.cdb"); } else { answer.len = cdb_datalen(&c); } } else { /* no data for DEFAULTUSER either, so we don't have any data for the client */ stralloc_copys(&answer, NOPE); } } /* write to the network with 120s timeout */ /* I guess the timeout isn't needed on an usual Unix */ r = timeoutwrite(120, stdout, answer.s, answer.len); if (r <= 0) { strerr_die2sys(111, FATAL, "unable to write to network: "); } /* free database */ cdb_free(&c); close(fd); } else { *clean_query = '\0'; } return 0; }
bool is_idx_metauniv(level const & l) { if (!is_meta(l)) return false; name const & n = meta_id(l); return !n.is_atomic() && n.is_numeral() && n.get_prefix() == *g_tmp_prefix; }
bool is_param_core(level const & l) { return is_param(l) || is_global(l) || is_meta(l); }
/** * egg_accelerator_parse_virtual: * @accelerator: string representing an accelerator * @accelerator_key: return location for accelerator keyval * @accelerator_mods: return location for accelerator modifier mask * * Parses a string representing a virtual accelerator. The format * looks like "<Control>a" or "<Shift><Alt>F1" or * "<Release>z" (the last one is for key release). The parser * is fairly liberal and allows lower or upper case, and also * abbreviations such as "<Ctl>" and "<Ctrl>". * * If the parse fails, @accelerator_key and @accelerator_mods will * be set to 0 (zero) and %FALSE will be returned. If the string contains * only modifiers, @accelerator_key will be set to 0 but %TRUE will be * returned. * * The virtual vs. concrete accelerator distinction is a relic of * how the X Window System works; there are modifiers Mod2-Mod5 that * can represent various keyboard keys (numlock, meta, hyper, etc.), * the virtual modifier represents the keyboard key, the concrete * modifier the actual Mod2-Mod5 bits in the key press event. * * Returns: %TRUE on success. */ gboolean egg_accelerator_parse_virtual (const gchar *accelerator, guint *accelerator_key, guint *keycode, EggVirtualModifierType *accelerator_mods) { guint keyval; GdkModifierType mods; gint len; gboolean bad_keyval; if (accelerator_key) *accelerator_key = 0; if (accelerator_mods) *accelerator_mods = 0; if (keycode) *keycode = 0; g_return_val_if_fail (accelerator != NULL, FALSE); bad_keyval = FALSE; keyval = 0; mods = 0; len = strlen (accelerator); while (len) { if (*accelerator == '<') { if (len >= 9 && is_release (accelerator)) { accelerator += 9; len -= 9; mods |= EGG_VIRTUAL_RELEASE_MASK; } else if (len >= 9 && is_control (accelerator)) { accelerator += 9; len -= 9; mods |= EGG_VIRTUAL_CONTROL_MASK; } else if (len >= 7 && is_shift (accelerator)) { accelerator += 7; len -= 7; mods |= EGG_VIRTUAL_SHIFT_MASK; } else if (len >= 6 && is_shft (accelerator)) { accelerator += 6; len -= 6; mods |= EGG_VIRTUAL_SHIFT_MASK; } else if (len >= 6 && is_ctrl (accelerator)) { accelerator += 6; len -= 6; mods |= EGG_VIRTUAL_CONTROL_MASK; } else if (len >= 6 && is_modx (accelerator)) { static const guint mod_vals[] = { EGG_VIRTUAL_ALT_MASK, EGG_VIRTUAL_MOD2_MASK, EGG_VIRTUAL_MOD3_MASK, EGG_VIRTUAL_MOD4_MASK, EGG_VIRTUAL_MOD5_MASK }; len -= 6; accelerator += 4; mods |= mod_vals[*accelerator - '1']; accelerator += 2; } else if (len >= 5 && is_ctl (accelerator)) { accelerator += 5; len -= 5; mods |= EGG_VIRTUAL_CONTROL_MASK; } else if (len >= 5 && is_alt (accelerator)) { accelerator += 5; len -= 5; mods |= EGG_VIRTUAL_ALT_MASK; } else if (len >= 6 && is_meta (accelerator)) { accelerator += 6; len -= 6; mods |= EGG_VIRTUAL_META_MASK; } else if (len >= 7 && is_hyper (accelerator)) { accelerator += 7; len -= 7; mods |= EGG_VIRTUAL_HYPER_MASK; } else if (len >= 7 && is_super (accelerator)) { accelerator += 7; len -= 7; mods |= EGG_VIRTUAL_SUPER_MASK; } else { gchar last_ch; last_ch = *accelerator; while (last_ch && last_ch != '>') { last_ch = *accelerator; accelerator += 1; len -= 1; } } } else { keyval = gdk_keyval_from_name (accelerator); if (keyval == 0) { /* If keyval is 0, than maybe it's a keycode. Check for 0x## */ if (len >= 4 && is_keycode (accelerator)) { char keystring[5]; gchar *endptr; gint tmp_keycode; memcpy (keystring, accelerator, 4); keystring [4] = '\000'; tmp_keycode = strtol (keystring, &endptr, 16); if (endptr == NULL || *endptr != '\000') { bad_keyval = TRUE; } else if (keycode != NULL) { *keycode = tmp_keycode; /* 0x00 is an invalid keycode too. */ if (*keycode == 0) bad_keyval = TRUE; } } } else if (keycode != NULL) *keycode = XKeysymToKeycode (GDK_DISPLAY(), keyval); accelerator += len; len -= len; } } if (accelerator_key) *accelerator_key = gdk_keyval_to_lower (keyval); if (accelerator_mods) *accelerator_mods = mods; return !bad_keyval; }
static gboolean accelerator_parse (const gchar *accelerator, MetaKeyCombo *combo) { guint keyval, keycode; MetaVirtualModifier mods; gint len; combo->keysym = 0; combo->keycode = 0; combo->modifiers = 0; if (accelerator == NULL) return FALSE; keyval = 0; keycode = 0; mods = 0; len = strlen (accelerator); while (len) { if (*accelerator == '<') { if (len >= 9 && is_primary (accelerator)) { /* Primary is treated the same as Control */ accelerator += 9; len -= 9; mods |= META_VIRTUAL_CONTROL_MASK; } else if (len >= 9 && is_control (accelerator)) { accelerator += 9; len -= 9; mods |= META_VIRTUAL_CONTROL_MASK; } else if (len >= 7 && is_shift (accelerator)) { accelerator += 7; len -= 7; mods |= META_VIRTUAL_SHIFT_MASK; } else if (len >= 6 && is_shft (accelerator)) { accelerator += 6; len -= 6; mods |= META_VIRTUAL_SHIFT_MASK; } else if (len >= 6 && is_ctrl (accelerator)) { accelerator += 6; len -= 6; mods |= META_VIRTUAL_CONTROL_MASK; } else if (len >= 6 && is_modx (accelerator)) { static const guint mod_vals[] = { META_VIRTUAL_ALT_MASK, META_VIRTUAL_MOD2_MASK, META_VIRTUAL_MOD3_MASK, META_VIRTUAL_MOD4_MASK, META_VIRTUAL_MOD5_MASK, }; len -= 6; accelerator += 4; mods |= mod_vals[*accelerator - '1']; accelerator += 2; } else if (len >= 5 && is_ctl (accelerator)) { accelerator += 5; len -= 5; mods |= META_VIRTUAL_CONTROL_MASK; } else if (len >= 5 && is_alt (accelerator)) { accelerator += 5; len -= 5; mods |= META_VIRTUAL_ALT_MASK; } else if (len >= 6 && is_meta (accelerator)) { accelerator += 6; len -= 6; mods |= META_VIRTUAL_META_MASK; } else if (len >= 7 && is_hyper (accelerator)) { accelerator += 7; len -= 7; mods |= META_VIRTUAL_HYPER_MASK; } else if (len >= 7 && is_super (accelerator)) { accelerator += 7; len -= 7; mods |= META_VIRTUAL_SUPER_MASK; } else { gchar last_ch; last_ch = *accelerator; while (last_ch && last_ch != '>') { last_ch = *accelerator; accelerator += 1; len -= 1; } } } else { if (len >= 4 && is_keycode (accelerator)) { keycode = strtoul (accelerator, NULL, 16); goto out; } else if (strcmp (accelerator, "Above_Tab") == 0) { keyval = META_KEY_ABOVE_TAB; goto out; } else { keyval = xkb_keysym_from_name (accelerator, XKB_KEYSYM_CASE_INSENSITIVE); if (keyval == XKB_KEY_NoSymbol) { char *with_xf86 = g_strconcat ("XF86", accelerator, NULL); keyval = xkb_keysym_from_name (with_xf86, XKB_KEYSYM_CASE_INSENSITIVE); g_free (with_xf86); if (keyval == XKB_KEY_NoSymbol) return FALSE; } } accelerator += len; len -= len; } } out: combo->keysym = keyval; combo->keycode = keycode; combo->modifiers = mods; return TRUE; }
name const & meta_id(level const & l) { lean_assert(is_meta(l)); return to_param_core(l).m_id; }
/** * Handle cp for a given src register. This additionally handles * the cases of collapsing immedate/const (which replace the src * register with a non-ssa src) or collapsing mov's from relative * src (which needs to also fixup the address src reference by the * instruction). */ static void reg_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr, struct ir3_register *reg, unsigned n) { struct ir3_instruction *src = ssa(reg); /* don't propagate copies into a PHI, since we don't know if the * src block executed: */ if (instr->opc == OPC_META_PHI) return; if (is_eligible_mov(src, true)) { /* simple case, no immed/const/relativ, only mov's w/ ssa src: */ struct ir3_register *src_reg = src->regs[1]; unsigned new_flags = reg->flags; combine_flags(&new_flags, src); if (valid_flags(instr, n, new_flags)) { if (new_flags & IR3_REG_ARRAY) { debug_assert(!(reg->flags & IR3_REG_ARRAY)); reg->array = src_reg->array; } reg->flags = new_flags; reg->instr = ssa(src_reg); } src = ssa(reg); /* could be null for IR3_REG_ARRAY case */ if (!src) return; } else if (is_same_type_mov(src) && /* cannot collapse const/immed/etc into meta instrs: */ !is_meta(instr)) { /* immed/const/etc cases, which require some special handling: */ struct ir3_register *src_reg = src->regs[1]; unsigned new_flags = reg->flags; combine_flags(&new_flags, src); if (!valid_flags(instr, n, new_flags)) { /* See if lowering an immediate to const would help. */ if (valid_flags(instr, n, (new_flags & ~IR3_REG_IMMED) | IR3_REG_CONST)) { debug_assert(new_flags & IR3_REG_IMMED); instr->regs[n + 1] = lower_immed(ctx, src_reg, new_flags); return; } /* special case for "normal" mad instructions, we can * try swapping the first two args if that fits better. * * the "plain" MAD's (ie. the ones that don't shift first * src prior to multiply) can swap their first two srcs if * src[0] is !CONST and src[1] is CONST: */ if ((n == 1) && is_mad(instr->opc) && !(instr->regs[0 + 1]->flags & (IR3_REG_CONST | IR3_REG_RELATIV)) && valid_flags(instr, 0, new_flags)) { /* swap src[0] and src[1]: */ struct ir3_register *tmp; tmp = instr->regs[0 + 1]; instr->regs[0 + 1] = instr->regs[1 + 1]; instr->regs[1 + 1] = tmp; n = 0; } else { return; } } /* Here we handle the special case of mov from * CONST and/or RELATIV. These need to be handled * specially, because in the case of move from CONST * there is no src ir3_instruction so we need to * replace the ir3_register. And in the case of * RELATIV we need to handle the address register * dependency. */ if (src_reg->flags & IR3_REG_CONST) { /* an instruction cannot reference two different * address registers: */ if ((src_reg->flags & IR3_REG_RELATIV) && conflicts(instr->address, reg->instr->address)) return; /* This seems to be a hw bug, or something where the timings * just somehow don't work out. This restriction may only * apply if the first src is also CONST. */ if ((opc_cat(instr->opc) == 3) && (n == 2) && (src_reg->flags & IR3_REG_RELATIV) && (src_reg->array.offset == 0)) return; src_reg = ir3_reg_clone(instr->block->shader, src_reg); src_reg->flags = new_flags; instr->regs[n+1] = src_reg; if (src_reg->flags & IR3_REG_RELATIV) ir3_instr_set_address(instr, reg->instr->address); return; } if ((src_reg->flags & IR3_REG_RELATIV) && !conflicts(instr->address, reg->instr->address)) { src_reg = ir3_reg_clone(instr->block->shader, src_reg); src_reg->flags = new_flags; instr->regs[n+1] = src_reg; ir3_instr_set_address(instr, reg->instr->address); return; } /* NOTE: seems we can only do immed integers, so don't * need to care about float. But we do need to handle * abs/neg *before* checking that the immediate requires * few enough bits to encode: * * TODO: do we need to do something to avoid accidentally * catching a float immed? */ if (src_reg->flags & IR3_REG_IMMED) { int32_t iim_val = src_reg->iim_val; debug_assert((opc_cat(instr->opc) == 1) || (opc_cat(instr->opc) == 6) || ir3_cat2_int(instr->opc)); if (new_flags & IR3_REG_SABS) iim_val = abs(iim_val); if (new_flags & IR3_REG_SNEG) iim_val = -iim_val; if (new_flags & IR3_REG_BNOT) iim_val = ~iim_val; /* other than category 1 (mov) we can only encode up to 10 bits: */ if ((instr->opc == OPC_MOV) || !((iim_val & ~0x3ff) && (-iim_val & ~0x3ff))) { new_flags &= ~(IR3_REG_SABS | IR3_REG_SNEG | IR3_REG_BNOT); src_reg = ir3_reg_clone(instr->block->shader, src_reg); src_reg->flags = new_flags; src_reg->iim_val = iim_val; instr->regs[n+1] = src_reg; } else if (valid_flags(instr, n, (new_flags & ~IR3_REG_IMMED) | IR3_REG_CONST)) { /* See if lowering an immediate to const would help. */ instr->regs[n+1] = lower_immed(ctx, src_reg, new_flags); } return; } } }