Ejemplo n.º 1
0
int zend_build_ssa(zend_arena **arena, const zend_op_array *op_array, uint32_t build_flags, zend_ssa *ssa, uint32_t *func_flags) /* {{{ */
{
	zend_basic_block *blocks = ssa->cfg.blocks;
	zend_ssa_block *ssa_blocks;
	int blocks_count = ssa->cfg.blocks_count;
	uint32_t set_size;
	zend_bitset tmp, gen, in;
	int *var = NULL;
	int i, j, k, changed;
	zend_dfg dfg;
	ALLOCA_FLAG(dfg_use_heap);
	ALLOCA_FLAG(var_use_heap);

	ssa->rt_constants = (build_flags & ZEND_RT_CONSTANTS);
	ssa_blocks = zend_arena_calloc(arena, blocks_count, sizeof(zend_ssa_block));
	if (!ssa_blocks) {
		return FAILURE;
	}
	ssa->blocks = ssa_blocks;

	/* Compute Variable Liveness */
	dfg.vars = op_array->last_var + op_array->T;
	dfg.size = set_size = zend_bitset_len(dfg.vars);
	dfg.tmp = do_alloca((set_size * sizeof(zend_ulong)) * (blocks_count * 5 + 1), dfg_use_heap);
	memset(dfg.tmp, 0, (set_size * sizeof(zend_ulong)) * (blocks_count * 5 + 1));
	dfg.gen = dfg.tmp + set_size;
	dfg.def = dfg.gen + set_size * blocks_count;
	dfg.use = dfg.def + set_size * blocks_count;
	dfg.in  = dfg.use + set_size * blocks_count;
	dfg.out = dfg.in  + set_size * blocks_count;

	if (zend_build_dfg(op_array, &ssa->cfg, &dfg, build_flags) != SUCCESS) {
		free_alloca(dfg.tmp, dfg_use_heap);
		return FAILURE;
	}

	if (build_flags & ZEND_SSA_DEBUG_LIVENESS) {
		zend_dump_dfg(op_array, &ssa->cfg, &dfg);
	}

	tmp = dfg.tmp;
	gen = dfg.gen;
	in  = dfg.in;

	/* SSA construction, Step 1: Propagate "gen" sets in merge points */
	do {
		changed = 0;
		for (j = 0; j < blocks_count; j++) {
			if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
				continue;
			}
			if (j >= 0 && (blocks[j].predecessors_count > 1 || j == 0)) {
				zend_bitset_copy(tmp, gen + (j * set_size), set_size);
				for (k = 0; k < blocks[j].predecessors_count; k++) {
					i = ssa->cfg.predecessors[blocks[j].predecessor_offset + k];
					while (i != -1 && i != blocks[j].idom) {
						zend_bitset_union_with_intersection(tmp, tmp, gen + (i * set_size), in + (j * set_size), set_size);
						i = blocks[i].idom;
					}
				}
				if (!zend_bitset_equal(gen + (j * set_size), tmp, set_size)) {
					zend_bitset_copy(gen + (j * set_size), tmp, set_size);
					changed = 1;
				}
			}
		}
	} while (changed);

	/* SSA construction, Step 2: Phi placement based on Dominance Frontiers */
	var = do_alloca(sizeof(int) * (op_array->last_var + op_array->T), var_use_heap);
	if (!var) {
		free_alloca(dfg.tmp, dfg_use_heap);
		return FAILURE;
	}
	zend_bitset_clear(tmp, set_size);

	for (j = 0; j < blocks_count; j++) {
		if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
			continue;
		}
		if (blocks[j].predecessors_count > 1) {
			zend_bitset_clear(tmp, set_size);
			if (blocks[j].flags & ZEND_BB_IRREDUCIBLE_LOOP) {
				/* Prevent any values from flowing into irreducible loops by
				   replacing all incoming values with explicit phis.  The
				   register allocator depends on this property.  */
				zend_bitset_copy(tmp, in + (j * set_size), set_size);
			} else {
				for (k = 0; k < blocks[j].predecessors_count; k++) {
					i = ssa->cfg.predecessors[blocks[j].predecessor_offset + k];
					while (i != -1 && i != blocks[j].idom) {
						zend_bitset_union_with_intersection(tmp, tmp, gen + (i * set_size), in + (j * set_size), set_size);
						i = blocks[i].idom;
					}
				}
			}

			if (!zend_bitset_empty(tmp, set_size)) {
				i = op_array->last_var + op_array->T;
				while (i > 0) {
					i--;
					if (zend_bitset_in(tmp, i)) {
						zend_ssa_phi *phi = zend_arena_calloc(arena, 1,
							sizeof(zend_ssa_phi) +
							sizeof(int) * blocks[j].predecessors_count +
							sizeof(void*) * blocks[j].predecessors_count);

						if (!phi) {
							goto failure;
						}
						phi->sources = (int*)(((char*)phi) + sizeof(zend_ssa_phi));
						memset(phi->sources, 0xff, sizeof(int) * blocks[j].predecessors_count);
						phi->use_chains = (zend_ssa_phi**)(((char*)phi->sources) + sizeof(int) * ssa->cfg.blocks[j].predecessors_count);

					    phi->pi = -1;
						phi->var = i;
						phi->ssa_var = -1;
						phi->next = ssa_blocks[j].phis;
						ssa_blocks[j].phis = phi;
					}
				}
			}
		}
	}

	place_essa_pis(arena, op_array, build_flags, ssa, &dfg);

	/* SSA construction, Step ?: Phi after Pi placement based on Dominance Frontiers */
	for (j = 0; j < blocks_count; j++) {
		if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
			continue;
		}
		if (blocks[j].predecessors_count > 1) {
			zend_bitset_clear(tmp, set_size);
			if (blocks[j].flags & ZEND_BB_IRREDUCIBLE_LOOP) {
				/* Prevent any values from flowing into irreducible loops by
				   replacing all incoming values with explicit phis.  The
				   register allocator depends on this property.  */
				zend_bitset_copy(tmp, in + (j * set_size), set_size);
			} else {
				for (k = 0; k < blocks[j].predecessors_count; k++) {
					i = ssa->cfg.predecessors[blocks[j].predecessor_offset + k];
					while (i != -1 && i != blocks[j].idom) {
						zend_ssa_phi *p = ssa_blocks[i].phis;
						while (p) {
							if (p) {
								if (p->pi >= 0) {
									if (zend_bitset_in(in + (j * set_size), p->var) &&
									    !zend_bitset_in(gen + (i * set_size), p->var)) {
										zend_bitset_incl(tmp, p->var);
									}
								} else {
									zend_bitset_excl(tmp, p->var);
								}
							}
							p = p->next;
						}
						i = blocks[i].idom;
					}
				}
			}

			if (!zend_bitset_empty(tmp, set_size)) {
				i = op_array->last_var + op_array->T;
				while (i > 0) {
					i--;
					if (zend_bitset_in(tmp, i)) {
						zend_ssa_phi **pp = &ssa_blocks[j].phis;
						while (*pp) {
							if ((*pp)->pi <= 0 && (*pp)->var == i) {
								break;
							}
							pp = &(*pp)->next;
						}
						if (*pp == NULL) {
							zend_ssa_phi *phi = zend_arena_calloc(arena, 1,
								sizeof(zend_ssa_phi) +
								sizeof(int) * blocks[j].predecessors_count +
								sizeof(void*) * blocks[j].predecessors_count);

							if (!phi) {
								goto failure;
							}
							phi->sources = (int*)(((char*)phi) + sizeof(zend_ssa_phi));
							memset(phi->sources, 0xff, sizeof(int) * blocks[j].predecessors_count);
							phi->use_chains = (zend_ssa_phi**)(((char*)phi->sources) + sizeof(int) * ssa->cfg.blocks[j].predecessors_count);

						    phi->pi = -1;
							phi->var = i;
							phi->ssa_var = -1;
							phi->next = NULL;
							*pp = phi;
						}
					}
				}
			}
		}
	}

	if (build_flags & ZEND_SSA_DEBUG_PHI_PLACEMENT) {
		zend_dump_phi_placement(op_array, ssa);
	}

	/* SSA construction, Step 3: Renaming */
	ssa->ops = zend_arena_calloc(arena, op_array->last, sizeof(zend_ssa_op));
	memset(ssa->ops, 0xff, op_array->last * sizeof(zend_ssa_op));
	memset(var, 0xff, (op_array->last_var + op_array->T) * sizeof(int));
	/* Create uninitialized SSA variables for each CV */
	for (j = 0; j < op_array->last_var; j++) {
		var[j] = j;
	}
	ssa->vars_count = op_array->last_var;
	if (zend_ssa_rename(op_array, build_flags, ssa, var, 0) != SUCCESS) {
failure:
		free_alloca(var, var_use_heap);
		free_alloca(dfg.tmp, dfg_use_heap);
		return FAILURE;
	}

	free_alloca(var, var_use_heap);
	free_alloca(dfg.tmp, dfg_use_heap);

	return SUCCESS;
}
Ejemplo n.º 2
0
int zend_build_dfg(const zend_op_array *op_array, const zend_cfg *cfg, zend_dfg *dfg) /* {{{ */
{
	int set_size;
	zend_basic_block *blocks = cfg->blocks;
	int blocks_count = cfg->blocks_count;
	zend_bitset tmp, gen, def, use, in, out;
	zend_op *opline;
	uint32_t k;
	int j;

	/* FIXME: can we use "gen" instead of "def" for flow analyzing? */
	set_size = dfg->size;
	tmp = dfg->tmp;
	gen = dfg->gen;
	def = dfg->def;
	use = dfg->use;
	in  = dfg->in;
	out = dfg->out;

	/* Collect "gen", "def" and "use" sets */
	for (j = 0; j < blocks_count; j++) {
		if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
			continue;
		}
		for (k = blocks[j].start; k <= blocks[j].end; k++) {
			opline = op_array->opcodes + k;
			if (opline->opcode != ZEND_OP_DATA) {
				zend_op *next = opline + 1;
				if (k < blocks[j].end &&
					next->opcode == ZEND_OP_DATA) {
					if (next->op1_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
						if (!DFG_ISSET(def, set_size, j, EX_VAR_TO_NUM(next->op1.var))) {
							DFG_SET(use, set_size, j, EX_VAR_TO_NUM(next->op1.var));
						}
					}
					if (next->op2_type == IS_CV) {
						if (!DFG_ISSET(def, set_size, j,EX_VAR_TO_NUM(next->op2.var))) {
							DFG_SET(use, set_size, j, EX_VAR_TO_NUM(next->op2.var));
						}
					} else if (next->op2_type == IS_VAR ||
							   next->op2_type == IS_TMP_VAR) {
						/* ZEND_ASSIGN_??? use the second operand
						   of the following OP_DATA instruction as
						   a temporary variable */
						switch (opline->opcode) {
							case ZEND_ASSIGN_DIM:
							case ZEND_ASSIGN_OBJ:
							case ZEND_ASSIGN_ADD:
							case ZEND_ASSIGN_SUB:
							case ZEND_ASSIGN_MUL:
							case ZEND_ASSIGN_DIV:
							case ZEND_ASSIGN_MOD:
							case ZEND_ASSIGN_SL:
							case ZEND_ASSIGN_SR:
							case ZEND_ASSIGN_CONCAT:
							case ZEND_ASSIGN_BW_OR:
							case ZEND_ASSIGN_BW_AND:
							case ZEND_ASSIGN_BW_XOR:
							case ZEND_ASSIGN_POW:
								break;
							default:
								if (!DFG_ISSET(def, set_size, j, EX_VAR_TO_NUM(next->op2.var))) {
									DFG_SET(use, set_size, j, EX_VAR_TO_NUM(next->op2.var));
								}
						}
					}
				}
				if (opline->op1_type == IS_CV) {
					switch (opline->opcode) {
					case ZEND_ASSIGN:
					case ZEND_ASSIGN_REF:
					case ZEND_BIND_GLOBAL:
					case ZEND_BIND_STATIC:
					case ZEND_SEND_VAR_EX:
					case ZEND_SEND_REF:
					case ZEND_SEND_VAR_NO_REF:
					case ZEND_FE_RESET_R:
					case ZEND_FE_RESET_RW:
					case ZEND_ADD_ARRAY_ELEMENT:
					case ZEND_INIT_ARRAY:
					case ZEND_BIND_LEXICAL:
						if (!DFG_ISSET(use, set_size, j, EX_VAR_TO_NUM(opline->op1.var))) {
							// FIXME: include into "use" to ...?
							DFG_SET(use, set_size, j, EX_VAR_TO_NUM(opline->op1.var));
							DFG_SET(def, set_size, j, EX_VAR_TO_NUM(opline->op1.var));
						}
						DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(opline->op1.var));
						break;
					case ZEND_UNSET_VAR:
						ZEND_ASSERT(opline->extended_value & ZEND_QUICK_SET);
						/* break missing intentionally */
					case ZEND_ASSIGN_ADD:
					case ZEND_ASSIGN_SUB:
					case ZEND_ASSIGN_MUL:
					case ZEND_ASSIGN_DIV:
					case ZEND_ASSIGN_MOD:
					case ZEND_ASSIGN_SL:
					case ZEND_ASSIGN_SR:
					case ZEND_ASSIGN_CONCAT:
					case ZEND_ASSIGN_BW_OR:
					case ZEND_ASSIGN_BW_AND:
					case ZEND_ASSIGN_BW_XOR:
					case ZEND_ASSIGN_POW:
					case ZEND_PRE_INC:
					case ZEND_PRE_DEC:
					case ZEND_POST_INC:
					case ZEND_POST_DEC:
					case ZEND_ASSIGN_DIM:
					case ZEND_ASSIGN_OBJ:
					case ZEND_UNSET_DIM:
					case ZEND_UNSET_OBJ:
					case ZEND_FETCH_DIM_W:
					case ZEND_FETCH_DIM_RW:
					case ZEND_FETCH_DIM_FUNC_ARG:
					case ZEND_FETCH_DIM_UNSET:
					case ZEND_FETCH_OBJ_W:
					case ZEND_FETCH_OBJ_RW:
					case ZEND_FETCH_OBJ_FUNC_ARG:
					case ZEND_FETCH_OBJ_UNSET:
						DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(opline->op1.var));
					default:
						if (!DFG_ISSET(def, set_size, j, EX_VAR_TO_NUM(opline->op1.var))) {
							DFG_SET(use, set_size, j, EX_VAR_TO_NUM(opline->op1.var));
						}
					}
				} else if (opline->op1_type == IS_VAR ||
						   opline->op1_type == IS_TMP_VAR) {
					if (!DFG_ISSET(def, set_size, j, EX_VAR_TO_NUM(opline->op1.var))) {
						DFG_SET(use, set_size, j, EX_VAR_TO_NUM(opline->op1.var));
					}
				}
				if (opline->op2_type == IS_CV) {
					switch (opline->opcode) {
						case ZEND_ASSIGN:
						case ZEND_ASSIGN_REF:
						case ZEND_FE_FETCH_R:
						case ZEND_FE_FETCH_RW:
							if (!DFG_ISSET(use, set_size, j, EX_VAR_TO_NUM(opline->op2.var))) {
								// FIXME: include into "use" to ...?
								DFG_SET(use, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
								DFG_SET(def, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
							}
							DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
							break;
						default:
							if (!DFG_ISSET(def, set_size, j, EX_VAR_TO_NUM(opline->op2.var))) {
								DFG_SET(use, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
							}
							break;
					}
				} else if (opline->op2_type == IS_VAR ||
						   opline->op2_type == IS_TMP_VAR) {
					if (opline->opcode == ZEND_FE_FETCH_R || opline->opcode == ZEND_FE_FETCH_RW) {
						if (!DFG_ISSET(use, set_size, j, EX_VAR_TO_NUM(opline->op2.var))) {
							DFG_SET(def, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
						}
						DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
					} else {
						if (!DFG_ISSET(def, set_size, j, EX_VAR_TO_NUM(opline->op2.var))) {
							DFG_SET(use, set_size, j, EX_VAR_TO_NUM(opline->op2.var));
						}
					}
				}
				if (opline->result_type == IS_CV) {
					if (!DFG_ISSET(use, set_size, j, EX_VAR_TO_NUM(opline->result.var))) {
						DFG_SET(def, set_size, j, EX_VAR_TO_NUM(opline->result.var));
					}
					DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(opline->result.var));
				} else if (opline->result_type == IS_VAR ||
						   opline->result_type == IS_TMP_VAR) {
					if (!DFG_ISSET(use, set_size, j, EX_VAR_TO_NUM(opline->result.var))) {
						DFG_SET(def, set_size, j, EX_VAR_TO_NUM(opline->result.var));
					}
					DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(opline->result.var));
				}
				if ((opline->opcode == ZEND_FE_FETCH_R || opline->opcode == ZEND_FE_FETCH_RW) && opline->result_type == IS_TMP_VAR) {
					if (!DFG_ISSET(use, set_size, j, EX_VAR_TO_NUM(next->result.var))) {
						DFG_SET(def, set_size, j, EX_VAR_TO_NUM(next->result.var));
					}
					DFG_SET(gen, set_size, j, EX_VAR_TO_NUM(next->result.var));
				}
			}
		}
	}

	/* Calculate "in" and "out" sets */
	{
		uint32_t worklist_len = zend_bitset_len(blocks_count);
		ALLOCA_FLAG(use_heap);
		zend_bitset worklist = ZEND_BITSET_ALLOCA(worklist_len, use_heap);
		memset(worklist, 0, worklist_len * ZEND_BITSET_ELM_SIZE);
		for (j = 0; j < blocks_count; j++) {
			zend_bitset_incl(worklist, j);
		}
		while (!zend_bitset_empty(worklist, worklist_len)) {
			/* We use the last block on the worklist, because predecessors tend to be located
			 * before the succeeding block, so this converges faster. */
			j = zend_bitset_last(worklist, worklist_len);
			zend_bitset_excl(worklist, j);

			if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
				continue;
			}
			if (blocks[j].successors[0] >= 0) {
				zend_bitset_copy(DFG_BITSET(out, set_size, j), DFG_BITSET(in, set_size, blocks[j].successors[0]), set_size);
				if (blocks[j].successors[1] >= 0) {
					zend_bitset_union(DFG_BITSET(out, set_size, j), DFG_BITSET(in, set_size, blocks[j].successors[1]), set_size);
				}
			} else {
				zend_bitset_clear(DFG_BITSET(out, set_size, j), set_size);
			}
			zend_bitset_union_with_difference(tmp, DFG_BITSET(use, set_size, j), DFG_BITSET(out, set_size, j), DFG_BITSET(def, set_size, j), set_size);
			if (!zend_bitset_equal(DFG_BITSET(in, set_size, j), tmp, set_size)) {
				zend_bitset_copy(DFG_BITSET(in, set_size, j), tmp, set_size);

				/* Add predecessors of changed block to worklist */
				{
					int *predecessors = &cfg->predecessors[blocks[j].predecessor_offset];
					for (k = 0; k < blocks[j].predecessors_count; k++) {
						zend_bitset_incl(worklist, predecessors[k]);
					}
				}
			}
		}

		free_alloca(worklist, use_heap);
	}

	return SUCCESS;
}
Ejemplo n.º 3
0
int zend_build_dfg(const zend_op_array *op_array, const zend_cfg *cfg, zend_dfg *dfg, uint32_t build_flags) /* {{{ */
{
	int set_size;
	zend_basic_block *blocks = cfg->blocks;
	int blocks_count = cfg->blocks_count;
	zend_bitset tmp, def, use, in, out;
	int k;
	uint32_t var_num;
	int j;

	set_size = dfg->size;
	tmp = dfg->tmp;
	def = dfg->def;
	use = dfg->use;
	in  = dfg->in;
	out = dfg->out;

	/* Collect "def" and "use" sets */
	for (j = 0; j < blocks_count; j++) {
		zend_op *opline, *end;
		if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
			continue;
		}

		opline = op_array->opcodes + blocks[j].start;
		end = opline + blocks[j].len;
		for (; opline < end; opline++) {
			if (opline->opcode != ZEND_OP_DATA) {
				zend_op *next = opline + 1;
				if (next < end && next->opcode == ZEND_OP_DATA) {
					if (next->op1_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
						var_num = EX_VAR_TO_NUM(next->op1.var);
						if (!DFG_ISSET(def, set_size, j, var_num)) {
							DFG_SET(use, set_size, j, var_num);
						}
					}
					if (next->op2_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
						var_num = EX_VAR_TO_NUM(next->op2.var);
						if (!DFG_ISSET(def, set_size, j, var_num)) {
							DFG_SET(use, set_size, j, var_num);
						}
					}
				}
				if (opline->op1_type == IS_CV) {
					var_num = EX_VAR_TO_NUM(opline->op1.var);
					switch (opline->opcode) {
					case ZEND_ADD_ARRAY_ELEMENT:
					case ZEND_INIT_ARRAY:
						if ((build_flags & ZEND_SSA_RC_INFERENCE)
								|| (opline->extended_value & ZEND_ARRAY_ELEMENT_REF)) {
							goto op1_def;
						}
						goto op1_use;
					case ZEND_FE_RESET_R:
					case ZEND_SEND_VAR:
					case ZEND_CAST:
					case ZEND_QM_ASSIGN:
					case ZEND_JMP_SET:
					case ZEND_COALESCE:
						if (build_flags & ZEND_SSA_RC_INFERENCE) {
							goto op1_def;
						}
						goto op1_use;
					case ZEND_YIELD:
						if ((build_flags & ZEND_SSA_RC_INFERENCE)
								|| (op_array->fn_flags & ZEND_ACC_RETURN_REFERENCE)) {
							goto op1_def;
						}
						goto op1_use;
					case ZEND_UNSET_VAR:
						ZEND_ASSERT(opline->extended_value & ZEND_QUICK_SET);
						/* break missing intentionally */
					case ZEND_ASSIGN:
					case ZEND_ASSIGN_REF:
					case ZEND_BIND_GLOBAL:
					case ZEND_BIND_STATIC:
					case ZEND_SEND_VAR_EX:
					case ZEND_SEND_REF:
					case ZEND_SEND_VAR_NO_REF:
					case ZEND_SEND_VAR_NO_REF_EX:
					case ZEND_FE_RESET_RW:
					case ZEND_ASSIGN_ADD:
					case ZEND_ASSIGN_SUB:
					case ZEND_ASSIGN_MUL:
					case ZEND_ASSIGN_DIV:
					case ZEND_ASSIGN_MOD:
					case ZEND_ASSIGN_SL:
					case ZEND_ASSIGN_SR:
					case ZEND_ASSIGN_CONCAT:
					case ZEND_ASSIGN_BW_OR:
					case ZEND_ASSIGN_BW_AND:
					case ZEND_ASSIGN_BW_XOR:
					case ZEND_ASSIGN_POW:
					case ZEND_PRE_INC:
					case ZEND_PRE_DEC:
					case ZEND_POST_INC:
					case ZEND_POST_DEC:
					case ZEND_ASSIGN_DIM:
					case ZEND_ASSIGN_OBJ:
					case ZEND_UNSET_DIM:
					case ZEND_UNSET_OBJ:
					case ZEND_FETCH_DIM_W:
					case ZEND_FETCH_DIM_RW:
					case ZEND_FETCH_DIM_FUNC_ARG:
					case ZEND_FETCH_DIM_UNSET:
					case ZEND_FETCH_OBJ_W:
					case ZEND_FETCH_OBJ_RW:
					case ZEND_FETCH_OBJ_FUNC_ARG:
					case ZEND_FETCH_OBJ_UNSET:
					case ZEND_VERIFY_RETURN_TYPE:
op1_def:
						/* `def` always come along with dtor or separation,
						 * thus the origin var info might be also `use`d in the feature(CG) */
						DFG_SET(use, set_size, j, var_num);
						DFG_SET(def, set_size, j, var_num);
						break;
					default:
op1_use:
						if (!DFG_ISSET(def, set_size, j, var_num)) {
							DFG_SET(use, set_size, j, var_num);
						}
					}
				} else if (opline->op1_type & (IS_VAR|IS_TMP_VAR)) {
					var_num = EX_VAR_TO_NUM(opline->op1.var);
					if (opline->opcode == ZEND_VERIFY_RETURN_TYPE) {
						DFG_SET(use, set_size, j, var_num);
						DFG_SET(def, set_size, j, var_num);
					} else if (!DFG_ISSET(def, set_size, j, var_num)) {
						DFG_SET(use, set_size, j, var_num);
					}
				}
				if (opline->op2_type == IS_CV) {
					var_num = EX_VAR_TO_NUM(opline->op2.var);
					switch (opline->opcode) {
						case ZEND_ASSIGN:
							if (build_flags & ZEND_SSA_RC_INFERENCE) {
								goto op2_def;
							}
							goto op2_use;
						case ZEND_BIND_LEXICAL:
							if ((build_flags & ZEND_SSA_RC_INFERENCE) || opline->extended_value) {
								goto op2_def;
							}
							goto op2_use;
						case ZEND_ASSIGN_REF:
						case ZEND_FE_FETCH_R:
						case ZEND_FE_FETCH_RW:
op2_def:
							// FIXME: include into "use" too ...?
							DFG_SET(use, set_size, j, var_num);
							DFG_SET(def, set_size, j, var_num);
							break;
						default:
op2_use:
							if (!DFG_ISSET(def, set_size, j, var_num)) {
								DFG_SET(use, set_size, j, var_num);
							}
							break;
					}
				} else if (opline->op2_type & (IS_VAR|IS_TMP_VAR)) {
					var_num = EX_VAR_TO_NUM(opline->op2.var);
					if (opline->opcode == ZEND_FE_FETCH_R || opline->opcode == ZEND_FE_FETCH_RW) {
						DFG_SET(def, set_size, j, var_num);
					} else {
						if (!DFG_ISSET(def, set_size, j, var_num)) {
							DFG_SET(use, set_size, j, var_num);
						}
					}
				}
				if (opline->result_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
					var_num = EX_VAR_TO_NUM(opline->result.var);
					if ((build_flags & ZEND_SSA_USE_CV_RESULTS)
					 && opline->result_type == IS_CV) {
						DFG_SET(use, set_size, j, var_num);
					}
					DFG_SET(def, set_size, j, var_num);
				}
			}
		}
	}

	/* Calculate "in" and "out" sets */
	{
		uint32_t worklist_len = zend_bitset_len(blocks_count);
		zend_bitset worklist;
		ALLOCA_FLAG(use_heap);
		worklist = ZEND_BITSET_ALLOCA(worklist_len, use_heap);
		memset(worklist, 0, worklist_len * ZEND_BITSET_ELM_SIZE);
		for (j = 0; j < blocks_count; j++) {
			zend_bitset_incl(worklist, j);
		}
		while (!zend_bitset_empty(worklist, worklist_len)) {
			/* We use the last block on the worklist, because predecessors tend to be located
			 * before the succeeding block, so this converges faster. */
			j = zend_bitset_last(worklist, worklist_len);
			zend_bitset_excl(worklist, j);

			if ((blocks[j].flags & ZEND_BB_REACHABLE) == 0) {
				continue;
			}
			if (blocks[j].successors_count != 0) {
				zend_bitset_copy(DFG_BITSET(out, set_size, j), DFG_BITSET(in, set_size, blocks[j].successors[0]), set_size);
				for (k = 1; k < blocks[j].successors_count; k++) {
					zend_bitset_union(DFG_BITSET(out, set_size, j), DFG_BITSET(in, set_size, blocks[j].successors[k]), set_size);
				}
			} else {
				zend_bitset_clear(DFG_BITSET(out, set_size, j), set_size);
			}
			zend_bitset_union_with_difference(tmp, DFG_BITSET(use, set_size, j), DFG_BITSET(out, set_size, j), DFG_BITSET(def, set_size, j), set_size);
			if (!zend_bitset_equal(DFG_BITSET(in, set_size, j), tmp, set_size)) {
				zend_bitset_copy(DFG_BITSET(in, set_size, j), tmp, set_size);

				/* Add predecessors of changed block to worklist */
				{
					int *predecessors = &cfg->predecessors[blocks[j].predecessor_offset];
					for (k = 0; k < blocks[j].predecessors_count; k++) {
						zend_bitset_incl(worklist, predecessors[k]);
					}
				}
			}
		}

		free_alloca(worklist, use_heap);
	}

	return SUCCESS;
}
Ejemplo n.º 4
0
void zend_optimize_temporary_variables(zend_op_array *op_array, zend_optimizer_ctx *ctx)
{
	int T = op_array->T;
	int offset = op_array->last_var;
	uint32_t bitset_len;
	zend_bitset taken_T;	/* T index in use */
	zend_op **start_of_T;	/* opline where T is first used */
	zend_bitset valid_T;	/* Is the map_T valid */
	int *map_T;				/* Map's the T to its new index */
	zend_op *opline, *end;
	int currT;
	int i;
	int max = -1;
	int var_to_free = -1;
	void *checkpoint = zend_arena_checkpoint(ctx->arena);

	bitset_len = zend_bitset_len(T);
	taken_T = (zend_bitset) zend_arena_alloc(&ctx->arena, bitset_len * ZEND_BITSET_ELM_SIZE);
	start_of_T = (zend_op **) zend_arena_alloc(&ctx->arena, T * sizeof(zend_op *));
	valid_T = (zend_bitset) zend_arena_alloc(&ctx->arena, bitset_len * ZEND_BITSET_ELM_SIZE);
	map_T = (int *) zend_arena_alloc(&ctx->arena, T * sizeof(int));

    end = op_array->opcodes;
    opline = &op_array->opcodes[op_array->last - 1];

    /* Find T definition points */
    while (opline >= end) {
        if (ZEND_RESULT_TYPE(opline) & (IS_VAR | IS_TMP_VAR)) {
			start_of_T[VAR_NUM(ZEND_RESULT(opline).var) - offset] = opline;
		}
		opline--;
	}

	zend_bitset_clear(valid_T, bitset_len);
	zend_bitset_clear(taken_T, bitset_len);

    end = op_array->opcodes;
    opline = &op_array->opcodes[op_array->last - 1];

    while (opline >= end) {
		if ((ZEND_OP1_TYPE(opline) & (IS_VAR | IS_TMP_VAR))) {
			currT = VAR_NUM(ZEND_OP1(opline).var) - offset;
			if (opline->opcode == ZEND_ROPE_END) {
				int num = (((opline->extended_value + 1) * sizeof(zend_string*)) + (sizeof(zval) - 1)) / sizeof(zval);
				int var;

				var = max;
				while (var >= 0 && !zend_bitset_in(taken_T, var)) {
					var--;
				}
				max = MAX(max, var + num);
				var = var + 1;
				map_T[currT] = var;
				zend_bitset_incl(valid_T, currT);
				zend_bitset_incl(taken_T, var);
				ZEND_OP1(opline).var = NUM_VAR(var + offset);
				while (num > 1) {
					num--;
					zend_bitset_incl(taken_T, var + num);
				}
			} else {
				if (!zend_bitset_in(valid_T, currT)) {
					int use_new_var = 0;

					/* Code in "finally" blocks may modify temorary variables.
					 * We allocate new temporaries for values that need to
					 * relive FAST_CALLs.
					 */
					if ((op_array->fn_flags & ZEND_ACC_HAS_FINALLY_BLOCK) &&
					    (opline->opcode == ZEND_RETURN ||
					     opline->opcode == ZEND_GENERATOR_RETURN ||
					     opline->opcode == ZEND_RETURN_BY_REF ||
					     opline->opcode == ZEND_FREE ||
					     opline->opcode == ZEND_FE_FREE)) {
						zend_op *curr = opline;

						while (--curr >= end) {
							if (curr->opcode == ZEND_FAST_CALL) {
								use_new_var = 1;
								break;
							} else if (curr->opcode != ZEND_FREE &&
							           curr->opcode != ZEND_FE_FREE &&
							           curr->opcode != ZEND_VERIFY_RETURN_TYPE &&
							           curr->opcode != ZEND_DISCARD_EXCEPTION) {
								break;
							}
						}
					}
					if (use_new_var) {
						i = ++max;
						zend_bitset_incl(taken_T, i);
					} else {
						GET_AVAILABLE_T();
					}
					map_T[currT] = i;
					zend_bitset_incl(valid_T, currT);
				}
				ZEND_OP1(opline).var = NUM_VAR(map_T[currT] + offset);
			}
		}

		if ((ZEND_OP2_TYPE(opline) & (IS_VAR | IS_TMP_VAR))) {
			currT = VAR_NUM(ZEND_OP2(opline).var) - offset;
			if (!zend_bitset_in(valid_T, currT)) {
				GET_AVAILABLE_T();
				map_T[currT] = i;
				zend_bitset_incl(valid_T, currT);
			}
			ZEND_OP2(opline).var = NUM_VAR(map_T[currT] + offset);
		}

		if (ZEND_RESULT_TYPE(opline) & (IS_VAR | IS_TMP_VAR)) {
			currT = VAR_NUM(ZEND_RESULT(opline).var) - offset;
			if (zend_bitset_in(valid_T, currT)) {
				if (start_of_T[currT] == opline) {
					/* ZEND_FAST_CALL can not share temporary var with others
					 * since the fast_var could also be set by ZEND_HANDLE_EXCEPTION
					 * which could be ahead of it */
					if (opline->opcode != ZEND_FAST_CALL) {
						zend_bitset_excl(taken_T, map_T[currT]);
					}
				}
				ZEND_RESULT(opline).var = NUM_VAR(map_T[currT] + offset);
				if (opline->opcode == ZEND_ROPE_INIT) {
					if (start_of_T[currT] == opline) {
						uint32_t num = ((opline->extended_value * sizeof(zend_string*)) + (sizeof(zval) - 1)) / sizeof(zval);
						while (num > 1) {
							num--;
							zend_bitset_excl(taken_T, map_T[currT]+num);
						}
					}
				}
			} else {
				/* Code which gets here is using a wrongly built opcode such as RECV() */
				GET_AVAILABLE_T();
				map_T[currT] = i;
				zend_bitset_incl(valid_T, currT);
				ZEND_RESULT(opline).var = NUM_VAR(i + offset);
			}
		}

		if (var_to_free >= 0) {
			zend_bitset_excl(taken_T, var_to_free);
			var_to_free = -1;
		}

		opline--;
	}

	if (op_array->live_range) {
		for (i = 0; i < op_array->last_live_range; i++) {
			op_array->live_range[i].var =
				NUM_VAR(map_T[VAR_NUM(op_array->live_range[i].var & ~ZEND_LIVE_MASK) - offset] + offset) |
				(op_array->live_range[i].var & ZEND_LIVE_MASK);
		}
	}

	zend_arena_release(&ctx->arena, checkpoint);
	op_array->T = max + 1;
}
Ejemplo n.º 5
0
/* This pass removes all CVs and temporaries that are completely unused. It does *not* merge any CVs or TMPs.
 * This pass does not operate on SSA form anymore. */
void zend_optimizer_compact_vars(zend_op_array *op_array) {
	int i;

	ALLOCA_FLAG(use_heap1);
	ALLOCA_FLAG(use_heap2);
	uint32_t used_vars_len = zend_bitset_len(op_array->last_var + op_array->T);
	zend_bitset used_vars = ZEND_BITSET_ALLOCA(used_vars_len, use_heap1);
	uint32_t *vars_map = do_alloca((op_array->last_var + op_array->T) * sizeof(uint32_t), use_heap2);
	uint32_t num_cvs, num_tmps;

	/* Determine which CVs are used */
	zend_bitset_clear(used_vars, used_vars_len);
	for (i = 0; i < op_array->last; i++) {
		zend_op *opline = &op_array->opcodes[i];
		if (opline->op1_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
			zend_bitset_incl(used_vars, VAR_NUM(opline->op1.var));
		}
		if (opline->op2_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
			zend_bitset_incl(used_vars, VAR_NUM(opline->op2.var));
		}
		if (opline->result_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
			zend_bitset_incl(used_vars, VAR_NUM(opline->result.var));
			if (opline->opcode == ZEND_ROPE_INIT) {
				uint32_t num = ((opline->extended_value * sizeof(zend_string*)) + (sizeof(zval) - 1)) / sizeof(zval);
				while (num > 1) {
					num--;
					zend_bitset_incl(used_vars, VAR_NUM(opline->result.var) + num);
				}
			}
		}
	}

	num_cvs = 0;
	for (i = 0; i < op_array->last_var; i++) {
		if (zend_bitset_in(used_vars, i)) {
			vars_map[i] = num_cvs++;
		} else {
			vars_map[i] = (uint32_t) -1;
		}
	}

	num_tmps = 0;
	for (i = op_array->last_var; i < op_array->last_var + op_array->T; i++) {
		if (zend_bitset_in(used_vars, i)) {
			vars_map[i] = num_cvs + num_tmps++;
		} else {
			vars_map[i] = (uint32_t) -1;
		}
	}

	free_alloca(used_vars, use_heap1);
	if (num_cvs == op_array->last_var && num_tmps == op_array->T) {
		free_alloca(vars_map, use_heap2);
		return;
	}

	ZEND_ASSERT(num_cvs <= op_array->last_var);
	ZEND_ASSERT(num_tmps <= op_array->T);

	/* Update CV and TMP references in opcodes */
	for (i = 0; i < op_array->last; i++) {
		zend_op *opline = &op_array->opcodes[i];
		if (opline->op1_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
			opline->op1.var = NUM_VAR(vars_map[VAR_NUM(opline->op1.var)]);
		}
		if (opline->op2_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
			opline->op2.var = NUM_VAR(vars_map[VAR_NUM(opline->op2.var)]);
		}
		if (opline->result_type & (IS_CV|IS_VAR|IS_TMP_VAR)) {
			opline->result.var = NUM_VAR(vars_map[VAR_NUM(opline->result.var)]);
		}
	}

	/* Update TMP references in live ranges */
	if (op_array->live_range) {
		for (i = 0; i < op_array->last_live_range; i++) {
			op_array->live_range[i].var =
				(op_array->live_range[i].var & ZEND_LIVE_MASK) |
				NUM_VAR(vars_map[VAR_NUM(op_array->live_range[i].var & ~ZEND_LIVE_MASK)]);
		}
	}

	/* Update CV name table */
	if (num_cvs != op_array->last_var) {
		zend_string **names = safe_emalloc(sizeof(zend_string *), num_cvs, 0);
		for (i = 0; i < op_array->last_var; i++) {
			if (vars_map[i] != (uint32_t) -1) {
				names[vars_map[i]] = op_array->vars[i];
			} else {
				zend_string_release(op_array->vars[i]);
			}
		}
		efree(op_array->vars);
		op_array->vars = names;
	}

	op_array->last_var = num_cvs;
	op_array->T = num_tmps;

	free_alloca(vars_map, use_heap2);
}
Ejemplo n.º 6
0
int zend_cfg_identify_loops(const zend_op_array *op_array, zend_cfg *cfg, uint32_t *flags) /* {{{ */
{
	int i, j, k;
	int depth;
	zend_basic_block *blocks = cfg->blocks;
	int *dj_spanning_tree;
	zend_worklist work;
	int flag = ZEND_FUNC_NO_LOOPS;
	ALLOCA_FLAG(list_use_heap);
	ALLOCA_FLAG(tree_use_heap);

	ZEND_WORKLIST_ALLOCA(&work, cfg->blocks_count, list_use_heap);
	dj_spanning_tree = do_alloca(sizeof(int) * cfg->blocks_count, tree_use_heap);

	for (i = 0; i < cfg->blocks_count; i++) {
		dj_spanning_tree[i] = -1;
	}
	zend_worklist_push(&work, 0);
	while (zend_worklist_len(&work)) {
	next:
		i = zend_worklist_peek(&work);
		/* Visit blocks immediately dominated by i. */
		for (j = blocks[i].children; j >= 0; j = blocks[j].next_child) {
			if (zend_worklist_push(&work, j)) {
				dj_spanning_tree[j] = i;
				goto next;
			}
		}
		/* Visit join edges.  */
		for (j = 0; j < 2; j++) {
			int succ = blocks[i].successors[j];
			if (succ < 0) {
				continue;
			} else if (blocks[succ].idom == i) {
				continue;
			} else if (zend_worklist_push(&work, succ)) {
				dj_spanning_tree[succ] = i;
				goto next;
			}
		}
		zend_worklist_pop(&work);
	}

	/* Identify loops.  See Sreedhar et al, "Identifying Loops Using DJ
	   Graphs".  */

	for (i = 0, depth = 0; i < cfg->blocks_count; i++) {
		if (blocks[i].level > depth) {
			depth = blocks[i].level;
		}
	}
	for (; depth >= 0; depth--) {
		for (i = 0; i < cfg->blocks_count; i++) {
			if (blocks[i].level != depth) {
				continue;
			}
			zend_bitset_clear(work.visited, zend_bitset_len(cfg->blocks_count));
			for (j = 0; j < blocks[i].predecessors_count; j++) {
				int pred = cfg->predecessors[blocks[i].predecessor_offset + j];

				/* A join edge is one for which the predecessor does not
				   immediately dominate the successor.  */
				if (blocks[i].idom == pred) {
					continue;
				}

				/* In a loop back-edge (back-join edge), the successor dominates
				   the predecessor.  */
				if (dominates(blocks, i, pred)) {
					blocks[i].flags |= ZEND_BB_LOOP_HEADER;
					flag &= ~ZEND_FUNC_NO_LOOPS;
					zend_worklist_push(&work, pred);
				} else {
					/* Otherwise it's a cross-join edge.  See if it's a branch
					   to an ancestor on the dominator spanning tree.  */
					int dj_parent = pred;
					while (dj_parent >= 0) {
						if (dj_parent == i) {
							/* An sp-back edge: mark as irreducible.  */
							blocks[i].flags |= ZEND_BB_IRREDUCIBLE_LOOP;
							flag |= ZEND_FUNC_IRREDUCIBLE;
							flag &= ~ZEND_FUNC_NO_LOOPS;
							break;
						} else {
							dj_parent = dj_spanning_tree[dj_parent];
						}
					}
				}
			}
			while (zend_worklist_len(&work)) {
				j = zend_worklist_pop(&work);
				if (blocks[j].loop_header < 0 && j != i) {
					blocks[j].loop_header = i;
					for (k = 0; k < blocks[j].predecessors_count; k++) {
						zend_worklist_push(&work, cfg->predecessors[blocks[j].predecessor_offset + k]);
					}
				}
			}
		}
	}

	free_alloca(dj_spanning_tree, tree_use_heap);
	ZEND_WORKLIST_FREE_ALLOCA(&work, list_use_heap);
	*flags |= flag;

	return SUCCESS;
}