Beispiel #1
0
/* Only for pre-ILK configs */
static void assert_pll(struct drm_i915_private *dev_priv,
		       enum pipe pipe, bool state)
{
	int reg;
	u32 val;
	bool cur_state;

	reg = DPLL(pipe);
	val = I915_READ(reg);
	cur_state = !!(val & DPLL_VCO_ENABLE);
	if (cur_state != state) fprintf(stderr,
	     "PLL state assertion failure (expected %s, current %s)\n",
	     state_string(state), state_string(cur_state));
}
Beispiel #2
0
static int __acpi_bus_get_power(struct acpi_device *device, int *state)
{
	int result = ACPI_STATE_UNKNOWN;

	if (!device || !state)
		return -EINVAL;

	if (!device->flags.power_manageable) {
		/* TBD: Non-recursive algorithm for walking up hierarchy. */
		*state = device->parent ?
			device->parent->power.state : ACPI_STATE_D0;
		goto out;
	}

	/*
	 * Get the device's power state either directly (via _PSC) or
	 * indirectly (via power resources).
	 */
	if (device->power.flags.explicit_get) {
		unsigned long long psc;
		acpi_status status = acpi_evaluate_integer(device->handle,
							   "_PSC", NULL, &psc);
		if (ACPI_FAILURE(status))
			return -ENODEV;

		result = psc;
	}
	/* The test below covers ACPI_STATE_UNKNOWN too. */
	if (result <= ACPI_STATE_D2) {
	  ; /* Do nothing. */
	} else if (device->power.flags.power_resources) {
		int error = acpi_power_get_inferred_state(device, &result);
		if (error)
			return error;
	} else if (result == ACPI_STATE_D3_HOT) {
		result = ACPI_STATE_D3;
	}

	/*
	 * If we were unsure about the device parent's power state up to this
	 * point, the fact that the device is in D0 implies that the parent has
	 * to be in D0 too.
	 */
	if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
	    && result == ACPI_STATE_D0)
		device->parent->power.state = ACPI_STATE_D0;

	*state = result;

 out:
	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
			  device->pnp.bus_id, state_string(*state)));

	return 0;
}
Beispiel #3
0
 T *create(Args &&... args) {
     rassert(state == EMPTY, "state is %s", state_string());
     state = CONSTRUCTING;
     try {
         new (&object_data[0]) T(std::forward<Args>(args)...);
     } catch (...) {
         state = EMPTY;
         throw;
     }
     state = INSTANTIATED;
     return get();
 }
Beispiel #4
0
static int msm_otg_start_hnp(struct otg_transceiver *xceiv)
{
	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
	enum usb_otg_state state;
	unsigned long flags;

	spin_lock_irqsave(&dev->lock, flags);
	state = dev->otg.state;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (state != OTG_STATE_A_HOST) {
		pr_err("HNP can not be initiated in %s state\n",
				state_string(state));
		return -EINVAL;
	}

	pr_debug("A-Host: HNP initiated\n");
	clear_bit(A_BUS_REQ, &dev->inputs);
	wake_lock(&dev->wlock);
	queue_work(dev->wq, &dev->sm_work);
	return 0;
}
Beispiel #5
0
static int msm_otg_start_srp(struct otg_transceiver *xceiv)
{
	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
	u32	val;
	int ret = 0;
	enum usb_otg_state state;
	unsigned long flags;

	spin_lock_irqsave(&dev->lock, flags);
	state = dev->otg.state;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (state != OTG_STATE_B_IDLE) {
		pr_err("SRP can not be initiated in %s state\n",
				state_string(state));
		ret = -EINVAL;
		goto out;
	}

	if ((jiffies - dev->b_last_se0_sess) < msecs_to_jiffies(TB_SRP_INIT)) {
		pr_debug("initial conditions of SRP are not met. Try again"
				"after some time\n");
		ret = -EAGAIN;
		goto out;
	}

	/* Harware auto assist data pulsing: Data pulse is given
	 * for 7msec; wait for vbus
	 */
	val = readl(USB_OTGSC);
	writel((val & ~OTGSC_INTR_STS_MASK) | OTGSC_HADP, USB_OTGSC);

	/* VBUS plusing is obsoleted in OTG 2.0 supplement */
out:
	return ret;
}
Beispiel #6
0
/* Called when entering a state */
int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
{
	state_changed = 1;
	if (fsm->transceiver->state == new_state)
		return 0;

	VDBG("chg state to %s", state_string(new_state));

	otg_leave_state(fsm, fsm->transceiver->state);

	switch (new_state) {
	case OTG_STATE_B_IDLE:
		otg_drv_vbus(fsm, 0);
		otg_chrg_vbus(fsm, 0);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_UNDEF);
		otg_add_timer(fsm, b_se0_srp_tmr);
		break;
	case OTG_STATE_B_SRP_INIT:
		otg_start_pulse(fsm);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_UNDEF);
		otg_add_timer(fsm, b_srp_fail_tmr);
		break;
	case OTG_STATE_B_PERIPHERAL:
		otg_chrg_vbus(fsm, 0);
		otg_loc_conn(fsm, 1);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_GADGET);
		break;
	case OTG_STATE_B_WAIT_ACON:
		otg_chrg_vbus(fsm, 0);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_HOST);
		otg_add_timer(fsm, b_ase0_brst_tmr);
		fsm->a_bus_suspend = 0;
		break;
	case OTG_STATE_B_HOST:
		otg_chrg_vbus(fsm, 0);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 1);
		otg_set_protocol(fsm, PROTO_HOST);
		usb_bus_start_enum(fsm->transceiver->host,
				   fsm->transceiver->host->otg_port);
		break;
	case OTG_STATE_A_IDLE:
		otg_drv_vbus(fsm, 0);
		otg_chrg_vbus(fsm, 0);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_HOST);
		break;
	case OTG_STATE_A_WAIT_VRISE:
		otg_drv_vbus(fsm, 1);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_HOST);
		otg_add_timer(fsm, a_wait_vrise_tmr);
		break;
	case OTG_STATE_A_WAIT_BCON:
		otg_drv_vbus(fsm, 1);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_HOST);
		otg_add_timer(fsm, a_wait_bcon_tmr);
		break;
	case OTG_STATE_A_HOST:
		otg_drv_vbus(fsm, 1);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 1);
		otg_set_protocol(fsm, PROTO_HOST);
		/* When HNP is triggered while a_bus_req = 0, a_host will 
		 * suspend too fast to complete a_set_b_hnp_en
		 */
		if (!fsm->a_bus_req || fsm->a_suspend_req)
			otg_add_timer(fsm, a_wait_enum_tmr);
		break;
	case OTG_STATE_A_SUSPEND:
		otg_drv_vbus(fsm, 1);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_HOST);
		otg_add_timer(fsm, a_aidl_bdis_tmr);

		break;
	case OTG_STATE_A_PERIPHERAL:
		otg_loc_conn(fsm, 1);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_GADGET);
		otg_drv_vbus(fsm, 1);
		break;
	case OTG_STATE_A_WAIT_VFALL:
		otg_drv_vbus(fsm, 0);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_HOST);
		break;
	case OTG_STATE_A_VBUS_ERR:
		otg_drv_vbus(fsm, 0);
		otg_loc_conn(fsm, 0);
		otg_loc_sof(fsm, 0);
		otg_set_protocol(fsm, PROTO_UNDEF);
		break;
	default:
		break;
	}

	fsm->transceiver->state = new_state;
	return 0;
}
Beispiel #7
0
/* State change judgement */
int otg_statemachine(struct otg_fsm *fsm)
{
	enum usb_otg_state state;
	unsigned long flags;

	spin_lock_irqsave(&fsm->lock, flags);

	state = fsm->transceiver->state;
	state_changed = 0;
	/* State machine state change judgement */

	VDBG("top: curr state=%s", state_string(state));

	switch (state) {
	case OTG_STATE_UNDEFINED:
		VDBG("fsm->id = %d", fsm->id);
		if (fsm->id)
			otg_set_state(fsm, OTG_STATE_B_IDLE);
		else
			otg_set_state(fsm, OTG_STATE_A_IDLE);
		break;
	case OTG_STATE_B_IDLE:
		VDBG("gadget: %p", fsm->transceiver->gadget);
		if (!fsm->id)
			otg_set_state(fsm, OTG_STATE_A_IDLE);
		else if (fsm->b_sess_vld && fsm->transceiver->gadget)
			otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
		else if (fsm->b_bus_req && fsm->b_sess_end && fsm->b_se0_srp)
			otg_set_state(fsm, OTG_STATE_B_SRP_INIT);
		break;
	case OTG_STATE_B_SRP_INIT:
		if (!fsm->id || fsm->b_srp_done)
			otg_set_state(fsm, OTG_STATE_B_IDLE);
		break;
	case OTG_STATE_B_PERIPHERAL:
		if (!fsm->id || !fsm->b_sess_vld)
			otg_set_state(fsm, OTG_STATE_B_IDLE);
		else if (fsm->b_bus_req &&
			 fsm->transceiver->gadget->b_hnp_enable &&
			 fsm->a_bus_suspend)
			otg_set_state(fsm, OTG_STATE_B_WAIT_ACON);
		break;
	case OTG_STATE_B_WAIT_ACON:
		if (fsm->a_conn)
			otg_set_state(fsm, OTG_STATE_B_HOST);
		else if (!fsm->id || !fsm->b_sess_vld)
			otg_set_state(fsm, OTG_STATE_B_IDLE);
		else if (fsm->a_bus_resume || fsm->b_ase0_brst_tmout) {
			fsm->b_ase0_brst_tmout = 0;
			otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
		}
		break;
	case OTG_STATE_B_HOST:
		if (!fsm->id || !fsm->b_sess_vld)
			otg_set_state(fsm, OTG_STATE_B_IDLE);
		else if (!fsm->b_bus_req || !fsm->a_conn)
			otg_set_state(fsm, OTG_STATE_B_PERIPHERAL);
		break;
	case OTG_STATE_A_IDLE:
		if (fsm->id)
			otg_set_state(fsm, OTG_STATE_B_IDLE);
		else if (!fsm->a_bus_drop && (fsm->a_bus_req || fsm->a_srp_det))
			otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
		break;
	case OTG_STATE_A_WAIT_VRISE:
		if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld ||
		    fsm->a_wait_vrise_tmout) {
			otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_WAIT_BCON:
		if (!fsm->a_vbus_vld)
			otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
		else if (fsm->b_conn)
			otg_set_state(fsm, OTG_STATE_A_HOST);
		else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout)
			otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
		break;
	case OTG_STATE_A_HOST:
		if ((!fsm->a_bus_req || fsm->a_suspend_req) &&
		    fsm->transceiver->host->b_hnp_enable)
			otg_set_state(fsm, OTG_STATE_A_SUSPEND);
		else if (fsm->id || !fsm->b_conn || fsm->a_bus_drop)
			otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
		else if (!fsm->a_vbus_vld)
			otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
		break;
	case OTG_STATE_A_SUSPEND:
		if (!fsm->b_conn && fsm->transceiver->host->b_hnp_enable)
			otg_set_state(fsm, OTG_STATE_A_PERIPHERAL);
		else if (!fsm->b_conn && !fsm->transceiver->host->b_hnp_enable)
			otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
		else if (fsm->a_bus_req || fsm->b_bus_resume)
			otg_set_state(fsm, OTG_STATE_A_HOST);
		else if (fsm->id || fsm->a_bus_drop || fsm->a_aidl_bdis_tmout)
			otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
		else if (!fsm->a_vbus_vld)
			otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
		break;
	case OTG_STATE_A_PERIPHERAL:
		if (fsm->id || fsm->a_bus_drop)
			otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
		else if (fsm->b_bus_suspend)
			otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
		else if (!fsm->a_vbus_vld)
			otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
		break;
	case OTG_STATE_A_WAIT_VFALL:
		if (fsm->id || fsm->a_bus_req ||
		    (!fsm->a_sess_vld && !fsm->b_conn))
			otg_set_state(fsm, OTG_STATE_A_IDLE);
		break;
	case OTG_STATE_A_VBUS_ERR:
		if (fsm->id || fsm->a_bus_drop || fsm->a_clr_err)
			otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
		break;
	default:
		break;
	}
	spin_unlock_irqrestore(&fsm->lock, flags);

	return state_changed;
}
static void isp_update_otg(struct isp1301 *isp, u8 stat)
{
	u8			isp_stat, isp_bstat;
	enum usb_otg_state	state = isp->otg.state;

	if (stat & INTR_BDIS_ACON)
		pr_debug("OTG:  BDIS_ACON, %s\n", state_name(isp));

	/* start certain state transitions right away */
	isp_stat = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE);
	if (isp_stat & INTR_ID_GND) {
		if (isp->otg.default_a) {
			switch (state) {
			case OTG_STATE_B_IDLE:
				a_idle(isp, "idle");
				/* FALLTHROUGH */
			case OTG_STATE_A_IDLE:
				enable_vbus_source(isp);
				/* FALLTHROUGH */
			case OTG_STATE_A_WAIT_VRISE:
				/* we skip over OTG_STATE_A_WAIT_BCON, since
				 * the HC will transition to A_HOST (or
				 * A_SUSPEND!) without our noticing except
				 * when HNP is used.
				 */
				if (isp_stat & INTR_VBUS_VLD)
					isp->otg.state = OTG_STATE_A_HOST;
				break;
			case OTG_STATE_A_WAIT_VFALL:
				if (!(isp_stat & INTR_SESS_VLD))
					a_idle(isp, "vfell");
				break;
			default:
				if (!(isp_stat & INTR_VBUS_VLD))
					isp->otg.state = OTG_STATE_A_VBUS_ERR;
				break;
			}
			isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
		} else {
			switch (state) {
			case OTG_STATE_B_PERIPHERAL:
			case OTG_STATE_B_HOST:
			case OTG_STATE_B_WAIT_ACON:
				usb_gadget_vbus_disconnect(isp->otg.gadget);
				break;
			default:
				break;
			}
			if (state != OTG_STATE_A_IDLE)
				a_idle(isp, "id");
			if (isp->otg.host && state == OTG_STATE_A_IDLE)
				isp1301_defer_work(isp, WORK_HOST_RESUME);
			isp_bstat = 0;
		}
	} else {
		u32 l;

		/* if user unplugged mini-A end of cable,
		 * don't bypass A_WAIT_VFALL.
		 */
		if (isp->otg.default_a) {
			switch (state) {
			default:
				isp->otg.state = OTG_STATE_A_WAIT_VFALL;
				break;
			case OTG_STATE_A_WAIT_VFALL:
				state = OTG_STATE_A_IDLE;
				/* khubd may take a while to notice and
				 * handle this disconnect, so don't go
				 * to B_IDLE quite yet.
				 */
				break;
			case OTG_STATE_A_IDLE:
				host_suspend(isp);
				isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1,
						MC1_BDIS_ACON_EN);
				isp->otg.state = OTG_STATE_B_IDLE;
				l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
				l &= ~OTG_CTRL_BITS;
				omap_writel(l, OTG_CTRL);
				break;
			case OTG_STATE_B_IDLE:
				break;
			}
		}
		isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS);

		switch (isp->otg.state) {
		case OTG_STATE_B_PERIPHERAL:
		case OTG_STATE_B_WAIT_ACON:
		case OTG_STATE_B_HOST:
			if (likely(isp_bstat & OTG_B_SESS_VLD))
				break;
			enable_vbus_draw(isp, 0);
#ifndef	CONFIG_USB_OTG
			/* UDC driver will clear OTG_BSESSVLD */
			isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1,
						OTG1_DP_PULLDOWN);
			isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1,
						OTG1_DP_PULLUP);
			dump_regs(isp, __func__);
#endif
			/* FALLTHROUGH */
		case OTG_STATE_B_SRP_INIT:
			b_idle(isp, __func__);
			l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
			omap_writel(l, OTG_CTRL);
			/* FALLTHROUGH */
		case OTG_STATE_B_IDLE:
			if (isp->otg.gadget && (isp_bstat & OTG_B_SESS_VLD)) {
#ifdef	CONFIG_USB_OTG
				update_otg1(isp, isp_stat);
				update_otg2(isp, isp_bstat);
#endif
				b_peripheral(isp);
			} else if (!(isp_stat & (INTR_VBUS_VLD|INTR_SESS_VLD)))
				isp_bstat |= OTG_B_SESS_END;
			break;
		case OTG_STATE_A_WAIT_VFALL:
			break;
		default:
			pr_debug("otg: unsupported b-device %s\n",
				state_name(isp));
			break;
		}
	}

	if (state != isp->otg.state)
		pr_debug("  isp, %s -> %s\n",
				state_string(state), state_name(isp));

#ifdef	CONFIG_USB_OTG
	/* update the OTG controller state to match the isp1301; may
	 * trigger OPRT_CHG irqs for changes going to the isp1301.
	 */
	update_otg1(isp, isp_stat);
	update_otg2(isp, isp_bstat);
	check_state(isp, __func__);
#endif

	dump_regs(isp, "isp1301->otg");
}
Beispiel #9
0
FuncAnalysis do_analyze_collect(const Index& index,
                                Context const inputCtx,
                                CollectedInfo& collect,
                                ClassAnalysis* clsAnalysis,
                                const std::vector<Type>* knownArgs) {
  auto const ctx = adjust_closure_context(inputCtx);
  FuncAnalysis ai(ctx);

  Trace::Bump bumper{Trace::hhbbc, kTraceFuncBump,
    is_trace_function(ctx.cls, ctx.func)};
  FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));

  /*
   * Set of RPO ids that still need to be visited.
   *
   * Initially, we need each entry block in this list.  As we visit
   * blocks, we propagate states to their successors and across their
   * back edges---when state merges cause a change to the block
   * stateIn, we will add it to this queue so it gets visited again.
   */
  auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);

  /*
   * There are potentially infinitely growing types when we're using
   * union_of to merge states, so occasonially we need to apply a
   * widening operator.
   *
   * Currently this is done by having a straight-forward hueristic: if
   * you visit a block too many times, we'll start doing all the
   * merges with the widening operator until we've had a chance to
   * visit the block again.  We must then continue iterating in case
   * the actual fixed point is higher than the result of widening.
   *
   * Terminiation is guaranteed because the widening operator has only
   * finite chains in the type lattice.
   */
  auto nonWideVisits = std::vector<uint32_t>(ctx.func->nextBlockId);

  // For debugging, count how many times basic blocks get interpreted.
  auto interp_counter = uint32_t{0};

  /*
   * Iterate until a fixed point.
   *
   * Each time a stateIn for a block changes, we re-insert the block's
   * rpo ID in incompleteQ.  Since incompleteQ is ordered, we'll
   * always visit blocks with earlier RPO ids first, which hopefully
   * means less iterations.
   */
  while (!incompleteQ.empty()) {
    auto const blk = ai.rpoBlocks[incompleteQ.pop()];

    if (nonWideVisits[blk->id]++ > options.analyzeFuncWideningLimit) {
      nonWideVisits[blk->id] = 0;
    }

    FTRACE(2, "block #{}\nin {}{}", blk->id,
      state_string(*ctx.func, ai.bdata[blk->id].stateIn),
      property_state_string(collect.props));
    ++interp_counter;

    auto propagate = [&] (php::Block& target, const State& st) {
      auto const needsWiden =
        nonWideVisits[target.id] >= options.analyzeFuncWideningLimit;

      // We haven't optimized the widening operator much, because it
      // doesn't happen in practice right now.  We want to know when
      // it starts happening:
      if (needsWiden) {
        std::fprintf(stderr, "widening in %s on %s\n",
          ctx.unit->filename->data(),
          ctx.func->name->data());
      }

      FTRACE(2, "     {}-> {}\n", needsWiden ? "widening " : "", target.id);
      FTRACE(4, "target old {}",
        state_string(*ctx.func, ai.bdata[target.id].stateIn));

      auto const changed =
        needsWiden ? widen_into(ai.bdata[target.id].stateIn, st)
                   : merge_into(ai.bdata[target.id].stateIn, st);
      if (changed) {
        incompleteQ.push(rpoId(ai, &target));
      }
      FTRACE(4, "target new {}",
        state_string(*ctx.func, ai.bdata[target.id].stateIn));
    };

    auto stateOut = ai.bdata[blk->id].stateIn;
    auto interp   = Interp { index, ctx, collect, blk, stateOut };
    auto flags    = run(interp, propagate);
    if (flags.returned) {
      ai.inferredReturn = union_of(std::move(ai.inferredReturn),
                                   std::move(*flags.returned));
    }
  }

  ai.closureUseTypes = std::move(collect.closureUseTypes);

  if (ctx.func->isGenerator) {
    if (ctx.func->isAsync) {
      // Async generators always return AsyncGenerator object.
      ai.inferredReturn = objExact(index.builtin_class(s_AsyncGenerator.get()));
    } else {
      // Non-async generators always return Generator object.
      ai.inferredReturn = objExact(index.builtin_class(s_Generator.get()));
    }
  } else if (ctx.func->isAsync) {
    // Async functions always return WaitH<T>, where T is the type returned
    // internally.
    ai.inferredReturn = wait_handle(index, ai.inferredReturn);
  }

  /*
   * If inferredReturn is TBottom, the callee didn't execute a return
   * at all.  (E.g. it unconditionally throws, or is an abstract
   * function body.)
   *
   * In this case, we leave the return type as TBottom, to indicate
   * the same to callers.
   */
  assert(ai.inferredReturn.subtypeOf(TGen));

  // For debugging, print the final input states for each block.
  FTRACE(2, "{}", [&] {
    auto const bsep = std::string(60, '=') + "\n";
    auto const sep = std::string(60, '-') + "\n";
    auto ret = folly::format(
      "{}function {} ({} block interps):\n{}",
      bsep,
      show(ctx),
      interp_counter,
      bsep
    ).str();
    for (auto& bd : ai.bdata) {
      ret += folly::format(
        "{}block {}:\nin {}",
        sep,
        ai.rpoBlocks[bd.rpoId]->id,
        state_string(*ctx.func, bd.stateIn)
      ).str();
    }
    ret += sep + bsep;
    folly::format(&ret,
      "Inferred return type: {}\n", show(ai.inferredReturn));
    ret += bsep;
    return ret;
  }());

  return ai;
}
static inline const char *state_name(struct isp1301 *isp)
{
	return state_string(isp->otg.state);
}
static void check_state(struct isp1301 *isp, const char *tag)
{
	enum usb_otg_state	state = OTG_STATE_UNDEFINED;
	u8			fsm = omap_readw(OTG_TEST) & 0x0ff;
	unsigned		extra = 0;

	switch (fsm) {

	/* default-b */
	case 0x0:
		state = OTG_STATE_B_IDLE;
		break;
	case 0x3:
	case 0x7:
		extra = 1;
	case 0x1:
		state = OTG_STATE_B_PERIPHERAL;
		break;
	case 0x11:
		state = OTG_STATE_B_SRP_INIT;
		break;

	/* extra dual-role default-b states */
	case 0x12:
	case 0x13:
	case 0x16:
		extra = 1;
	case 0x17:
		state = OTG_STATE_B_WAIT_ACON;
		break;
	case 0x34:
		state = OTG_STATE_B_HOST;
		break;

	/* default-a */
	case 0x36:
		state = OTG_STATE_A_IDLE;
		break;
	case 0x3c:
		state = OTG_STATE_A_WAIT_VFALL;
		break;
	case 0x7d:
		state = OTG_STATE_A_VBUS_ERR;
		break;
	case 0x9e:
	case 0x9f:
		extra = 1;
	case 0x89:
		state = OTG_STATE_A_PERIPHERAL;
		break;
	case 0xb7:
		state = OTG_STATE_A_WAIT_VRISE;
		break;
	case 0xb8:
		state = OTG_STATE_A_WAIT_BCON;
		break;
	case 0xb9:
		state = OTG_STATE_A_HOST;
		break;
	case 0xba:
		state = OTG_STATE_A_SUSPEND;
		break;
	default:
		break;
	}
	if (isp->otg.state == state && !extra)
		return;
	pr_debug("otg: %s FSM %s/%02x, %s, %06x\n", tag,
		state_string(state), fsm, state_name(isp),
		omap_readl(OTG_CTRL));
}
Beispiel #12
0
static int msm_otg_set_suspend(struct otg_transceiver *xceiv, int suspend)
{
	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
	enum usb_otg_state state;
	unsigned long flags;

	if (!dev || (dev != the_msm_otg))
		return -ENODEV;

	spin_lock_irqsave(&dev->lock, flags);
	state = dev->otg.state;
	spin_unlock_irqrestore(&dev->lock, flags);

	pr_debug("suspend request in state: %s\n",
			state_string(state));

	if (suspend) {
		switch (state) {
		case OTG_STATE_A_HOST:
			clear_bit(A_BUS_REQ, &dev->inputs);
			wake_lock(&dev->wlock);
			queue_work(dev->wq, &dev->sm_work);
			break;
		case OTG_STATE_B_PERIPHERAL:
			if (xceiv->gadget->b_hnp_enable) {
				set_bit(A_BUS_SUSPEND, &dev->inputs);
				set_bit(B_BUS_REQ, &dev->inputs);
				wake_lock(&dev->wlock);
				queue_work(dev->wq, &dev->sm_work);
			}
			break;
		case OTG_STATE_A_PERIPHERAL:
			msm_otg_start_timer(dev, TA_BIDL_ADIS,
					A_BIDL_ADIS);
			break;
		default:
			break;
		}
	} else {
		unsigned long timeout;

		switch (state) {
		case OTG_STATE_A_PERIPHERAL:
			/* A-peripheral observed activity on bus.
			 * clear A_BIDL_ADIS timer.
			 */
			msm_otg_del_timer(dev);
			break;
		case OTG_STATE_A_SUSPEND:
			/* Remote wakeup or resume */
			set_bit(A_BUS_REQ, &dev->inputs);
			spin_lock_irqsave(&dev->lock, flags);
			dev->otg.state = OTG_STATE_A_HOST;
			spin_unlock_irqrestore(&dev->lock, flags);
			break;
		default:
			break;
		}

		if (suspend == atomic_read(&dev->in_lpm))
			return 0;

		disable_irq(dev->irq);
		if (dev->pmic_notif_supp)
			dev->pdata->pmic_enable_ldo(1);

		msm_otg_resume(dev);

		if (!is_phy_clk_disabled())
			goto out;

		timeout = jiffies + usecs_to_jiffies(100);
		enable_phy_clk();
		while (is_phy_clk_disabled()) {
			if (time_after(jiffies, timeout)) {
				pr_err("%s: Unable to wakeup phy\n", __func__);
				/* Reset both phy and link */
				otg_reset(dev, 1);
				break;
			}
			udelay(10);
		}
out:
		enable_irq(dev->irq);

	}

	return 0;
}
Beispiel #13
0
static irqreturn_t msm_otg_irq(int irq, void *data)
{
	struct msm_otg *dev = data;
	u32 otgsc, sts, pc, sts_mask;
	irqreturn_t ret = IRQ_HANDLED;
	int work = 0;
	enum usb_otg_state state;

	if (atomic_read(&dev->in_lpm)) {
		msm_otg_resume(dev);
		goto out;
	}

	otgsc = readl(USB_OTGSC);
	sts = readl(USB_USBSTS);

	sts_mask = (otgsc & OTGSC_INTR_MASK) >> 8;

	if (!((otgsc & sts_mask) || (sts & STS_PCI))) {
		ret = IRQ_NONE;
		goto out;
	}

	spin_lock(&dev->lock);
	state = dev->otg.state;
	spin_unlock(&dev->lock);

	pr_debug("IRQ state: %s\n", state_string(state));
	pr_debug("otgsc = %x\n", otgsc);

	if (otgsc & OTGSC_IDIS) {
		if (otgsc & OTGSC_ID) {
			pr_debug("Id set\n");
			set_bit(ID, &dev->inputs);
		} else {
			pr_debug("Id clear\n");
			/* Assert a_bus_req to supply power on
			 * VBUS when Micro/Mini-A cable is connected
			 * with out user intervention.
			 */
			set_bit(A_BUS_REQ, &dev->inputs);
			clear_bit(ID, &dev->inputs);
		}
		writel(otgsc, USB_OTGSC);
		work = 1;
	} else if (otgsc & OTGSC_BSVIS) {
		writel(otgsc, USB_OTGSC);
		/* BSV interrupt comes when operating as an A-device
		 * (VBUS on/off).
		 */
		if (state >= OTG_STATE_A_IDLE)
			goto out;
		if (otgsc & OTGSC_BSV) {
			pr_debug("BSV set\n");
			set_bit(B_SESS_VLD, &dev->inputs);
		} else {
			pr_debug("BSV clear\n");
			clear_bit(B_SESS_VLD, &dev->inputs);
		}
		work = 1;
	} else if (otgsc & OTGSC_DPIS) {
		pr_debug("DPIS detected\n");
		writel(otgsc, USB_OTGSC);
		set_bit(A_SRP_DET, &dev->inputs);
		set_bit(A_BUS_REQ, &dev->inputs);
		work = 1;
	} else if (sts & STS_PCI) {
		pc = readl(USB_PORTSC);
		pr_debug("portsc = %x\n", pc);
		ret = IRQ_NONE;
		/* HCD Acks PCI interrupt. We use this to switch
		 * between different OTG states.
		 */
		work = 1;
		switch (state) {
		case OTG_STATE_A_SUSPEND:
			if (dev->otg.host->b_hnp_enable && (pc & PORTSC_CSC) &&
					!(pc & PORTSC_CCS)) {
				pr_debug("B_CONN clear\n");
				clear_bit(B_CONN, &dev->inputs);
			}
			break;
		case OTG_STATE_B_WAIT_ACON:
			if ((pc & PORTSC_CSC) && (pc & PORTSC_CCS)) {
				pr_debug("A_CONN set\n");
				set_bit(A_CONN, &dev->inputs);
				/* Clear ASE0_BRST timer */
				msm_otg_del_timer(dev);
			}
			break;
		case OTG_STATE_B_HOST:
			if ((pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) {
				pr_debug("A_CONN clear\n");
				clear_bit(A_CONN, &dev->inputs);
			}
			break;
		default:
			work = 0;
			break;
		}
	}
	if (work) {
		wake_lock(&dev->wlock);
		queue_work(dev->wq, &dev->sm_work);
	}
out:
	return ret;
}
Beispiel #14
0
    // Handle OCC poll response
    void Occ::pollRspHandler(const uint8_t * i_pollResponse,
                             const uint16_t i_pollResponseSize)
    {
        static uint32_t L_elog_retry_count = 0;
        TMGT_DBG("OCC Poll Response", i_pollResponse, i_pollResponseSize);

        const occPollRspStruct_t *pollRsp =
            (occPollRspStruct_t *) i_pollResponse;
        const occPollRspStruct_t *lastPollRsp =
            (occPollRspStruct_t *) iv_lastPollResponse;

        // Trace if any data changed
        if ((false == iv_lastPollValid) ||
            (memcmp(pollRsp,
                    lastPollRsp,
                    OCC_POLL_DATA_MIN_SIZE) != 0))
        {
            TMGT_INF("OCC%d Poll change: Status:%04X Occs:%02X Cfg:%02X "
                     "State:%02X Error:%06X/%08X",
                     iv_instance,
                     (pollRsp->status << 8) | pollRsp->extStatus,
                     pollRsp->occsPresent,
                     pollRsp->requestedCfg, pollRsp->state,
                     (pollRsp->errorId<<16) | pollRsp->errorLength,
                     pollRsp->errorAddress);
        }

        do
        {
            if (false == iv_commEstablished)
            {
                // 1st poll response, so comm has been established for this OCC
                iv_commEstablished = true;
                TMGT_INF("pollRspHandler: FW Level for OCC%d: %.16s",
                         iv_instance, pollRsp->codeLevel);
            }

            // Check for Error Logs
            if (pollRsp->errorId != 0)
            {
                if ((pollRsp->errorId != lastPollRsp->errorId) ||
                    (L_elog_retry_count < 3))

                {
                    if (pollRsp->errorId == lastPollRsp->errorId)
                    {
                        // Only retry same errorId a few times...
                        L_elog_retry_count++;
                        TMGT_ERR("pollRspHandler: Requesting elog 0x%02X"
                                 " (retry %d)",
                                 pollRsp->errorId, L_elog_retry_count);
                    }
                    else
                    {
                        L_elog_retry_count = 0;
                    }

                    // Handle a new error log from the OCC
                    occProcessElog(this,
                                   pollRsp->errorId,
                                   pollRsp->errorAddress,
                                   pollRsp->errorLength);
                    if (iv_needsReset)
                    {
                        // Update state if changed...
                        // (since dropping out of poll rsp handler)
                        if (iv_state != pollRsp->state)
                        {
                            iv_state = (occStateId)pollRsp->state;
                            TMGT_INF("pollRspHandler: updating OCC%d state"
                                     " to %s",
                                     iv_instance, state_string(iv_state));
                        }
                        break;
                    }
                }
            }

            if ((OCC_STATE_ACTIVE == pollRsp->state) ||
                (OCC_STATE_OBSERVATION == pollRsp->state))
            {
                // Check role status
                if (((OCC_ROLE_SLAVE == iv_role) &&
                     ((pollRsp->status & OCC_STATUS_MASTER) != 0)) ||
                    ((OCC_ROLE_MASTER == iv_role) &&
                     ((pollRsp->status & OCC_STATUS_MASTER) == 0)))
                {
                    TMGT_ERR("pollRspHandler: OCC%d Status role mismatch"
                             " (role:0x%02X, status:0x%02X 0x%02X)",
                             iv_instance, iv_role, pollRsp->status,
                             pollRsp->extStatus);
                    iv_needsReset = true;
                    // TODO RTC 109224
                    //iv_resetReason = OCC_RESET_REASON_ERROR;
                    break;
                }
            }

            //iv_requestedFormat = (occCfgDataFormat)pollRsp->requestedCfg;
            if (pollRsp->requestedCfg != 0x00)
            {
                TMGT_INF("pollRspHandler: OCC%d is requesting cfg format"
                         " 0x%02X", iv_instance,
                         pollRsp->requestedCfg);
            }

            // Check for state change
            if (iv_state != pollRsp->state)
            {
                iv_state = (occStateId)pollRsp->state;
                TMGT_INF("pollRspHandler: updating OCC%d state to %s",
                         iv_instance, state_string(iv_state));
            }

            // Copy rspData to lastPollResponse
            memcpy(iv_lastPollResponse, pollRsp, OCC_POLL_DATA_MIN_SIZE);
            iv_lastPollValid = true;
        }
        while(0);

        // NOTE: When breaking out of the above while loop, the new poll
        //       response is NOT copied to lastPollResponse (should only
        //       break when reset required)

        if (true == iv_needsReset)
        {
            // Save full poll response
            memcpy(iv_lastPollResponse, pollRsp, OCC_POLL_DATA_MIN_SIZE);
            iv_lastPollValid = true;
            iv_state = (occStateId)pollRsp->state;
        }

    } // end Occ::pollRspHandler()
Beispiel #15
0
FuncAnalysis do_analyze_collect(const Index& index,
                                Context const ctx,
                                CollectedInfo& collect,
                                ClassAnalysis* clsAnalysis,
                                const std::vector<Type>* knownArgs) {
  assertx(ctx.cls == adjust_closure_context(ctx).cls);
  FuncAnalysis ai{ctx};

  auto const bump = trace_bump_for(ctx.cls, ctx.func);
  Trace::Bump bumper1{Trace::hhbbc, bump};
  Trace::Bump bumper2{Trace::hhbbc_cfg, bump};

  if (knownArgs) {
    FTRACE(2, "{:.^70}\n", "Inline Interp");
  }
  SCOPE_EXIT {
    if (knownArgs) {
      FTRACE(2, "{:.^70}\n", "End Inline Interp");
    }
  };

  FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));

  /*
   * Set of RPO ids that still need to be visited.
   *
   * Initially, we need each entry block in this list.  As we visit
   * blocks, we propagate states to their successors and across their
   * back edges---when state merges cause a change to the block
   * stateIn, we will add it to this queue so it gets visited again.
   */
  auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);

  /*
   * There are potentially infinitely growing types when we're using union_of to
   * merge states, so occasionally we need to apply a widening operator.
   *
   * Currently this is done by having a straight-forward hueristic: if you visit
   * a block too many times, we'll start doing all the merges with the widening
   * operator. We must then continue iterating in case the actual fixed point is
   * higher than the result of widening. Likewise if we loop too much because of
   * local static types changing, we'll widen those.
   *
   * Termination is guaranteed because the widening operator has only finite
   * chains in the type lattice.
   */
  auto totalVisits = std::vector<uint32_t>(ctx.func->blocks.size());
  auto totalLoops = uint32_t{0};

  // For debugging, count how many times basic blocks get interpreted.
  auto interp_counter = uint32_t{0};

  // Used to force blocks that depended on the types of local statics
  // to be re-analyzed when the local statics change.
  std::unordered_map<borrowed_ptr<const php::Block>, std::map<LocalId, Type>>
    usedLocalStatics;

  /*
   * Iterate until a fixed point.
   *
   * Each time a stateIn for a block changes, we re-insert the block's
   * rpo ID in incompleteQ.  Since incompleteQ is ordered, we'll
   * always visit blocks with earlier RPO ids first, which hopefully
   * means less iterations.
   */
  do {
    while (!incompleteQ.empty()) {
      auto const blk = ai.rpoBlocks[incompleteQ.pop()];

      totalVisits[blk->id]++;

      FTRACE(2, "block #{}\nin {}{}", blk->id,
             state_string(*ctx.func, ai.bdata[blk->id].stateIn, collect),
             property_state_string(collect.props));
      ++interp_counter;

      auto propagate = [&] (BlockId target, const State* st) {
        if (!st) {
          FTRACE(2, "     Force reprocess: {}\n", target);
          incompleteQ.push(rpoId(ai, target));
          return;
        }

        auto const needsWiden =
          totalVisits[target] >= options.analyzeFuncWideningLimit;

        FTRACE(2, "     {}-> {}\n", needsWiden ? "widening " : "", target);
        FTRACE(4, "target old {}",
               state_string(*ctx.func, ai.bdata[target].stateIn, collect));

        auto const changed =
          needsWiden ? widen_into(ai.bdata[target].stateIn, *st)
                     : merge_into(ai.bdata[target].stateIn, *st);
        if (changed) {
          incompleteQ.push(rpoId(ai, target));
        }
        FTRACE(4, "target new {}",
               state_string(*ctx.func, ai.bdata[target].stateIn, collect));
      };

      auto stateOut = ai.bdata[blk->id].stateIn;
      auto interp   = Interp { index, ctx, collect, blk, stateOut };
      auto flags    = run(interp, propagate);
      if (any(collect.opts & CollectionOpts::EffectFreeOnly) &&
          !collect.effectFree) {
        break;
      }
      // We only care about the usedLocalStatics from the last visit
      if (flags.usedLocalStatics) {
        usedLocalStatics[blk] = std::move(*flags.usedLocalStatics);
      } else {
        usedLocalStatics.erase(blk);
      }

      if (flags.returned) {
        ai.inferredReturn |= std::move(*flags.returned);
      }
    }

    if (any(collect.opts & CollectionOpts::EffectFreeOnly) &&
        !collect.effectFree) {
      break;
    }

    // maybe some local statics changed type since the last time their
    // blocks were visited.

    if (totalLoops++ >= options.analyzeFuncWideningLimit) {
      // If we loop too many times because of static locals, widen them to
      // ensure termination.
      for (auto& t : collect.localStaticTypes) {
        t = widen_type(std::move(t));
      }
    }

    for (auto const& elm : usedLocalStatics) {
      for (auto const& ls : elm.second) {
        if (collect.localStaticTypes[ls.first] != ls.second) {
          incompleteQ.push(rpoId(ai, elm.first->id));
          break;
        }
      }
    }
  } while (!incompleteQ.empty());

  ai.closureUseTypes = std::move(collect.closureUseTypes);
  ai.cnsMap = std::move(collect.cnsMap);
  ai.readsUntrackedConstants = collect.readsUntrackedConstants;
  ai.mayUseVV = collect.mayUseVV;
  ai.effectFree = collect.effectFree;
  ai.unfoldableFuncs = collect.unfoldableFuncs;

  index.fixup_return_type(ctx.func, ai.inferredReturn);

  /*
   * If inferredReturn is TBottom, the callee didn't execute a return
   * at all.  (E.g. it unconditionally throws, or is an abstract
   * function body.)
   *
   * In this case, we leave the return type as TBottom, to indicate
   * the same to callers.
   */
  assert(ai.inferredReturn.subtypeOf(TGen));

  // For debugging, print the final input states for each block.
  FTRACE(2, "{}", [&] {
    auto const bsep = std::string(60, '=') + "\n";
    auto const sep = std::string(60, '-') + "\n";
    auto ret = folly::format(
      "{}function {} ({} block interps):\n{}",
      bsep,
      show(ctx),
      interp_counter,
      bsep
    ).str();
    for (auto& bd : ai.bdata) {
      folly::format(
        &ret,
        "{}block {}:\nin {}",
        sep,
        ai.rpoBlocks[bd.rpoId]->id,
        state_string(*ctx.func, bd.stateIn, collect)
      );
    }
    ret += sep + bsep;
    folly::format(&ret, "Inferred return type: {}\n", show(ai.inferredReturn));
    ret += bsep;
    return ret;
  }());

  // Do this after the tracing above
  ai.localStaticTypes = std::move(collect.localStaticTypes);
  return ai;
}
Beispiel #16
0
static void msm_otg_sm_work(struct work_struct *w)
{
	struct msm_otg *dev = container_of(w, struct msm_otg, sm_work);
	int ret;
	int work = 0;
	enum usb_otg_state state;

	if (atomic_read(&dev->in_lpm))
		msm_otg_set_suspend(&dev->otg, 0);

	spin_lock_irq(&dev->lock);
	state = dev->otg.state;
	spin_unlock_irq(&dev->lock);

	pr_debug("state: %s\n", state_string(state));

	switch (state) {
	case OTG_STATE_UNDEFINED:
		if (!dev->otg.host || !is_host())
			set_bit(ID, &dev->inputs);

		if (dev->otg.gadget && is_b_sess_vld())
			set_bit(B_SESS_VLD, &dev->inputs);

		spin_lock_irq(&dev->lock);
		if (test_bit(ID, &dev->inputs)) {
			dev->otg.state = OTG_STATE_B_IDLE;
		} else {
			set_bit(A_BUS_REQ, &dev->inputs);
			dev->otg.state = OTG_STATE_A_IDLE;
		}
		spin_unlock_irq(&dev->lock);

		work = 1;
		break;
	case OTG_STATE_B_IDLE:
		dev->otg.default_a = 0;
		if (!test_bit(ID, &dev->inputs)) {
			pr_debug("!id\n");
			clear_bit(B_BUS_REQ, &dev->inputs);
			otg_reset(dev, 0);

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		} else if (test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("b_sess_vld\n");
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_PERIPHERAL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 1);
		} else if (test_bit(B_BUS_REQ, &dev->inputs)) {
			pr_debug("b_sess_end && b_bus_req\n");
			ret = msm_otg_start_srp(&dev->otg);
			if (ret < 0) {
				/* notify user space */
				clear_bit(B_BUS_REQ, &dev->inputs);
				work = 1;
				break;
			}
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_SRP_INIT;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TB_SRP_FAIL, B_SRP_FAIL);
			break;
		} else {
			pr_debug("entering into lpm\n");
			msm_otg_suspend(dev);

		}
		break;
	case OTG_STATE_B_SRP_INIT:
		if (!test_bit(ID, &dev->inputs) ||
				test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("!id || b_sess_vld\n");
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		} else if (test_bit(B_SRP_FAIL, &dev->tmouts)) {
			pr_debug("b_srp_fail\n");
			/* notify user space */
			clear_bit(B_BUS_REQ, &dev->inputs);
			clear_bit(B_SRP_FAIL, &dev->tmouts);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			dev->b_last_se0_sess = jiffies;
			work = 1;
		}
		break;
	case OTG_STATE_B_PERIPHERAL:
		if (!test_bit(ID, &dev->inputs) ||
				!test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("!id || !b_sess_vld\n");
			clear_bit(B_BUS_REQ, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->b_last_se0_sess = jiffies;

			/* Workaround: Reset phy after session */
			otg_reset(dev, 1);

			/* come back later to put hardware in
			 * lpm. This removes addition checks in
			 * suspend routine for missing BSV
			 */
			work = 1;
		} else if (test_bit(B_BUS_REQ, &dev->inputs) &&
				dev->otg.gadget->b_hnp_enable &&
				test_bit(A_BUS_SUSPEND, &dev->inputs)) {
			pr_debug("b_bus_req && b_hnp_en && a_bus_suspend\n");
			msm_otg_start_timer(dev, TB_ASE0_BRST, B_ASE0_BRST);
			msm_otg_start_peripheral(&dev->otg, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_WAIT_ACON;
			spin_unlock_irq(&dev->lock);
			/* start HCD even before A-device enable
			 * pull-up to meet HNP timings.
			 */
			dev->otg.host->is_b_host = 1;
			msm_otg_start_host(&dev->otg, REQUEST_START);

		}
		break;
	case OTG_STATE_B_WAIT_ACON:
		if (!test_bit(ID, &dev->inputs) ||
				!test_bit(B_SESS_VLD, &dev->inputs)) {
			pr_debug("!id || !b_sess_vld\n");
			msm_otg_del_timer(dev);
			/* A-device is physically disconnected during
			 * HNP. Remove HCD.
			 */
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->otg.host->is_b_host = 0;

			clear_bit(B_BUS_REQ, &dev->inputs);
			clear_bit(A_BUS_SUSPEND, &dev->inputs);
			dev->b_last_se0_sess = jiffies;
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);

			/* Workaround: Reset phy after session */
			otg_reset(dev, 1);
			work = 1;
		} else if (test_bit(A_CONN, &dev->inputs)) {
			pr_debug("a_conn\n");
			clear_bit(A_BUS_SUSPEND, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_HOST;
			spin_unlock_irq(&dev->lock);
		} else if (test_bit(B_ASE0_BRST, &dev->tmouts)) {
			/* TODO: A-device may send reset after
			 * enabling HNP; a_bus_resume case is
			 * not handled for now.
			 */
			pr_debug("b_ase0_brst_tmout\n");
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->otg.host->is_b_host = 0;
			clear_bit(B_ASE0_BRST, &dev->tmouts);
			clear_bit(A_BUS_SUSPEND, &dev->inputs);
			clear_bit(B_BUS_REQ, &dev->inputs);

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_PERIPHERAL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
		}
		break;
	case OTG_STATE_B_HOST:
		/* B_BUS_REQ is not exposed to user space. So
		 * it must be A_CONN for now.
		 */
		if (!test_bit(B_BUS_REQ, &dev->inputs) ||
				!test_bit(A_CONN, &dev->inputs)) {
			pr_debug("!b_bus_req || !a_conn\n");
			clear_bit(A_CONN, &dev->inputs);
			clear_bit(B_BUS_REQ, &dev->inputs);

			msm_otg_start_host(&dev->otg, 0);
			dev->otg.host->is_b_host = 0;

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			/* Workaround: Reset phy after session */
			otg_reset(dev, 1);
			work = 1;
		}
		break;
	case OTG_STATE_A_IDLE:
		dev->otg.default_a = 1;
		if (test_bit(ID, &dev->inputs)) {
			pr_debug("id\n");
			dev->otg.default_a = 0;
			otg_reset(dev, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_B_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		} else if (!test_bit(A_BUS_DROP, &dev->inputs) &&
				(test_bit(A_SRP_DET, &dev->inputs) ||
				 test_bit(A_BUS_REQ, &dev->inputs))) {
			pr_debug("!a_bus_drop && (a_srp_det || a_bus_req)\n");

			clear_bit(A_SRP_DET, &dev->inputs);
			/* Disable SRP detection */
			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) &
					~OTGSC_DPIE, USB_OTGSC);

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VRISE;
			spin_unlock_irq(&dev->lock);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
			msm_otg_start_timer(dev, TA_WAIT_VRISE, A_WAIT_VRISE);
			/* no need to schedule work now */
		} else {
			pr_debug("No session requested\n");

			/* A-device is not providing power on VBUS.
			 * Enable SRP detection.
			 */
			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
					OTGSC_DPIE, USB_OTGSC);
			msm_otg_suspend(dev);

		}
		break;
	case OTG_STATE_A_WAIT_VRISE:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_WAIT_VRISE, &dev->tmouts)) {
			pr_debug("id || a_bus_drop || a_wait_vrise_tmout\n");
			clear_bit(A_BUS_REQ, &dev->inputs);
			msm_otg_del_timer(dev);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("a_vbus_vld\n");
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
			/* Start HCD to detect peripherals. */
			msm_otg_start_host(&dev->otg, REQUEST_START);
		}
		break;
	case OTG_STATE_A_WAIT_BCON:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_WAIT_BCON, &dev->tmouts)) {
			pr_debug("id || a_bus_drop || a_wait_bcon_tmout\n");
			msm_otg_del_timer(dev);
			clear_bit(A_BUS_REQ, &dev->inputs);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (test_bit(B_CONN, &dev->inputs)) {
			pr_debug("b_conn\n");
			msm_otg_del_timer(dev);
			/* HCD is added already. just move to
			 * A_HOST state.
			 */
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_HOST;
			spin_unlock_irq(&dev->lock);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			msm_otg_del_timer(dev);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
		}
		break;
	case OTG_STATE_A_HOST:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs)) {
			pr_debug("id || a_bus_drop\n");
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			/* no work */
		} else if (!test_bit(A_BUS_REQ, &dev->inputs)) {
			/* a_bus_req is de-asserted when root hub is
			 * suspended or HNP is in progress.
			 */
			pr_debug("!a_bus_req\n");
			
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_SUSPEND;
			spin_unlock_irq(&dev->lock);
			if (dev->otg.host->b_hnp_enable) {
				msm_otg_start_timer(dev, TA_AIDL_BDIS,
						A_AIDL_BDIS);
			} else {
				/* No HNP. Root hub suspended */
				msm_otg_suspend(dev);
			}
		} else if (!test_bit(B_CONN, &dev->inputs)) {
			pr_debug("!b_conn\n");
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_SUSPEND:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_AIDL_BDIS, &dev->tmouts)) {
			pr_debug("id || a_bus_drop || a_aidl_bdis_tmout\n");
			msm_otg_del_timer(dev);
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			msm_otg_del_timer(dev);
			clear_bit(B_CONN, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_host(&dev->otg, REQUEST_STOP);
		} else if (!test_bit(B_CONN, &dev->inputs) &&
				dev->otg.host->b_hnp_enable) {
			pr_debug("!b_conn && b_hnp_enable");
			/* Clear AIDL_BDIS timer */
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_PERIPHERAL;
			spin_unlock_irq(&dev->lock);

			msm_otg_start_host(&dev->otg, REQUEST_HNP_SUSPEND);

			/* We may come here even when B-dev is physically
			 * disconnected during HNP. We go back to host
			 * role if bus is idle for BIDL_ADIS time.
			 */
			dev->otg.gadget->is_a_peripheral = 1;
			msm_otg_start_peripheral(&dev->otg, 1);
		} else if (!test_bit(B_CONN, &dev->inputs) &&
				!dev->otg.host->b_hnp_enable) {
			pr_debug("!b_conn && !b_hnp_enable");
			/* bus request is dropped during suspend.
			 * acquire again for next device.
			 */
			set_bit(A_BUS_REQ, &dev->inputs);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_PERIPHERAL:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs)) {
			pr_debug("id || a_bus_drop\n");
			/* Clear BIDL_ADIS timer */
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->otg.gadget->is_a_peripheral = 0;
			/* HCD was suspended before. Stop it now */
                        msm_otg_start_host(&dev->otg, REQUEST_STOP);

			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
			pr_debug("!a_vbus_vld\n");
			/* Clear BIDL_ADIS timer */
			msm_otg_del_timer(dev);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_VBUS_ERR;
			spin_unlock_irq(&dev->lock);
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->otg.gadget->is_a_peripheral = 0;
			/* HCD was suspended before. Stop it now */
                        msm_otg_start_host(&dev->otg, REQUEST_STOP);
		} else if (test_bit(A_BIDL_ADIS, &dev->tmouts)) {
			pr_debug("a_bidl_adis_tmout\n");
			msm_otg_start_peripheral(&dev->otg, 0);
			dev->otg.gadget->is_a_peripheral = 0;

			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_BCON;
			spin_unlock_irq(&dev->lock);
			set_bit(A_BUS_REQ, &dev->inputs);
			msm_otg_start_host(&dev->otg, REQUEST_HNP_RESUME);
			msm_otg_start_timer(dev, TA_WAIT_BCON, A_WAIT_BCON);
		}
		break;
	case OTG_STATE_A_WAIT_VFALL:
		if (test_bit(A_WAIT_VFALL, &dev->tmouts)) {
			clear_bit(A_VBUS_VLD, &dev->inputs);
			/* Reset both phy and link */
			otg_reset(dev, 1);
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_IDLE;
			spin_unlock_irq(&dev->lock);
			work = 1;
		}
		break;
	case OTG_STATE_A_VBUS_ERR:
		if (test_bit(ID, &dev->inputs) ||
				test_bit(A_BUS_DROP, &dev->inputs) ||
				test_bit(A_CLR_ERR, &dev->inputs)) {
			spin_lock_irq(&dev->lock);
			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
			spin_unlock_irq(&dev->lock);
			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
		}
		break;
	default:
		pr_err("invalid OTG state\n");
	}

	if (work)
		queue_work(dev->wq, &dev->sm_work);

	/* IRQ/sysfs may queue work. Check work_pending. otherwise
	 * we might endup releasing wakelock after it is acquired
	 * in IRQ/sysfs.
	 */
	if (!work_pending(&dev->sm_work) && !hrtimer_active(&dev->timer))
		wake_unlock(&dev->wlock);
}
Beispiel #17
0
/**
 * The function passed to created threads.
 *
 * NOTE: is_empty(queue) is called a lot: It should be noted that we
 * must make sure it's only called when we have the queue lock!
 *
 * 1. Lock the task queue. We're using a condition lock, so we'll
 *    give up the lock until there is a task to run OR the tpDestroy
 *    function sent a broadcast to all threads that they should clean
 *    up.
 * 2. Wait for the signal (task inserted, or tp being destroyed).
 * 3. Now that the queue is locked, check the destruction state. This
 *    state should be valid because a. the change from ALIVE to
 *    something else is a one-way change, b. even if the following
 *    happened:
 *    - Task added
 *    - Thread got out of the WHILE loop
 *    - CONTEXT SWITCH
 *    - Main thread (pool creator) called tp_destroy, state changed
 *    - CONTEXT SWITCH
 *    - Back to our thread, got to the switch() statement and found
 *      out we're dying
 *    This is the desired behaviour (Piazza @281) - we do not need to
 *    make sure tasks added before calls to tpDestroy will  be executed
 *    if tpDestroy is called in DO_RUN mode, even if all threads were
 *    available when the task was added.
 * 4. If we're ALIVE, that means pool->queue IS NOT EMPTY (otherwise we
 *    would still be in the while loop, because you can't change DO_RUN
 *    or DO_ALL back to ALIVE so there's no danger we left the while()
 *    loop because of state!=ALIVE but got to state==ALIVE in the
 *    switch), so we can just dequeue a task and run it (remember to
 *    unlock before running!).
 * 5. If we're DO_ALL, it's like ALIVE but first check if there's
 *    something to run (unlike the ALIVE state, we don't know for sure).
 *    If there is, run it; otherwise, exit (no more tasks will come).
 * 6. If we're DO_RUN, exit. Don't take another task, leave them to rot.
 * 7. Rinse and repeat
 */
void* thread_func(void* void_tp) {
	
	int pid = TID();
	
	// Some useful variables
	State state;
	Task* t;
	ThreadPool* tp = (ThreadPool*)void_tp;
	
#if HW3_DEBUG
	// Initialize tp->tids
	pthread_t self = pthread_self();
	int thread_i;
	for (thread_i=0; thread_i<tp->N; ++thread_i)
		if (pthread_equal(tp->threads[thread_i],self)) {
			tp->tids[thread_i]=pid;
			break;
		}
#endif
	PRINT("Thread %d started it's function\n",pid);
	
	// Main thread task
	while(1) {
		
		// Get the initial state and the task lock, when we need it (task to do or we're dying)
		// IMPORTANT: LOCK THE TASK LOCK BEFORE READING THE STATE!
		// Otherwise, we can see this situation:
		// - T1 reads the state, it's ALIVE
		// - CS-->main thread
		// - Main thread calls tpDestroy
		// - Main thread broadcasts, starts waiting for all threads
		// - CS-->T1
		// - T1 locks the task lock (remember: state==ALIVE)
		// - The task queue is empty and state==ALIVE so T1 will wait for a signal that will never come.
		// Hence, DO NOT do this:
		// 1. state = read_state(tp);
		// 2. pthread_mutex_lock(&tp->task_lock);
		// But do it the other way round:
		pthread_mutex_lock(&tp->task_lock);										// This is OK because during INIT, we don't lock the task queue (after its creation)
		state = read_state(tp);
		PRINT("Thread %d locked the task queue\n",pid);
		while (osIsQueueEmpty(tp->tasks) && state == ALIVE) {					// Wait for a task OR the destruction of the pool
			PRINT("Thread %d started waiting for a signal\n",pid);
			pthread_cond_wait(&tp->queue_not_empty_or_dying,&tp->task_lock);	// Either one gives a signal
			state = read_state(tp);
			PRINT("Thread %d got the signal and locked the lock\n",pid);
		}
		PRINT("Thread %d got out of the while() loop, state==%s\n",pid,state_string(read_state(tp)));
		switch(state) {
			case ALIVE:											// If we're not dying, take a task and do it.
				t = (Task*)osDequeue(tp->tasks);
				pthread_mutex_unlock(&tp->task_lock);
				PRINT("Thread %d doing it's task\n",pid);
				t->func(t->param);
				free(t);
				break;
			case DO_ALL:										// If we're dying, but we should clean up the queue:
				if (!osIsQueueEmpty(tp->tasks)) {				// THIS TEST IS NOT USELESS! We may have got here
					t = (Task*)osDequeue(tp->tasks);			// via a broadcast() call from tp_destroy and the
					pthread_mutex_unlock(&tp->task_lock);		// state may be DO_ALL but is_empty() may be true...
					PRINT("Thread %d doing it's task\n",pid);	// Thus, the while() loop terminated and we got here.
					t->func(t->param);
					free(t);
				}
				else {											// If we're here, there are no more tasks to dequeue!
					pthread_mutex_unlock(&tp->task_lock);		// As we're being destroyed anyway, exit.
					PRINT("Thread %d unlocked the lock and returning\n",pid);
					return NULL;
				}
				break;
			case DO_RUN:										// If we're dying and no more tasks should be done,
				pthread_mutex_unlock(&tp->task_lock);			// just exit before dequeuing anything...
				PRINT("Thread %d unlocked the lock and returning\n",pid);
				return NULL;
				break;
		}
	}
}
Beispiel #18
0
static int __acpi_bus_set_power(struct acpi_device *device, int state)
{
	int result = 0;
	acpi_status status = AE_OK;
	char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };

	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
		return -EINVAL;

	/* Make sure this is a valid target state */

	if (state == device->power.state) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
				  state_string(state)));
		return 0;
	}

	if (!device->power.states[state].flags.valid) {
		printk(KERN_WARNING PREFIX "Device does not support %s\n",
		       state_string(state));
		return -ENODEV;
	}
	if (device->parent && (state < device->parent->power.state)) {
		printk(KERN_WARNING PREFIX
			      "Cannot set device to a higher-powered"
			      " state than parent\n");
		return -ENODEV;
	}

	/* For D3cold we should execute _PS3, not _PS4. */
	if (state == ACPI_STATE_D3_COLD)
		object_name[3] = '3';

	/*
	 * Transition Power
	 * ----------------
	 * On transitions to a high-powered state we first apply power (via
	 * power resources) then evalute _PSx.  Conversly for transitions to
	 * a lower-powered state.
	 */
	if (state < device->power.state) {
		if (device->power.flags.power_resources) {
			result = acpi_power_transition(device, state);
			if (result)
				goto end;
		}
		if (device->power.states[state].flags.explicit_set) {
			status = acpi_evaluate_object(device->handle,
						      object_name, NULL, NULL);
			if (ACPI_FAILURE(status)) {
				result = -ENODEV;
				goto end;
			}
		}
	} else {
		if (device->power.states[state].flags.explicit_set) {
			status = acpi_evaluate_object(device->handle,
						      object_name, NULL, NULL);
			if (ACPI_FAILURE(status)) {
				result = -ENODEV;
				goto end;
			}
		}
		if (device->power.flags.power_resources) {
			result = acpi_power_transition(device, state);
			if (result)
				goto end;
		}
	}

      end:
	if (result)
		printk(KERN_WARNING PREFIX
			      "Device [%s] failed to transition to %s\n",
			      device->pnp.bus_id, state_string(state));
	else {
		device->power.state = state;
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				  "Device [%s] transitioned to %s\n",
				  device->pnp.bus_id, state_string(state)));
	}

	return result;
}