Example #1
0
// Check if there are any JVM TI prefixes which have been applied to the native method name.
// If any are found, remove them before attemping the look up of the
// native implementation again.
// See SetNativeMethodPrefix in the JVM TI Spec for more details.
address NativeLookup::lookup_entry_prefixed(methodHandle method, bool& in_base_library, TRAPS) {
  ResourceMark rm(THREAD);

  int prefix_count;
  char** prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count);
  char* in_name = method->name()->as_C_string();
  char* wrapper_name = in_name;
  // last applied prefix will be first -- go backwards
  for (int i = prefix_count-1; i >= 0; i--) {
    char* prefix = prefixes[i];
    size_t prefix_len = strlen(prefix);
    if (strncmp(prefix, wrapper_name, prefix_len) == 0) {
      // has this prefix remove it
      wrapper_name += prefix_len;
    }
  }
  if (wrapper_name != in_name) {
    // we have a name for a wrapping method
    int wrapper_name_len = (int)strlen(wrapper_name);
    TempNewSymbol wrapper_symbol = SymbolTable::probe(wrapper_name, wrapper_name_len);
    if (wrapper_symbol != NULL) {
      KlassHandle kh(method->method_holder());
      methodOop wrapper_method = Klass::cast(kh())->lookup_method(wrapper_symbol,
                                                                  method->signature());
      if (wrapper_method != NULL && !wrapper_method->is_native()) {
        // we found a wrapper method, use its native entry
        method->set_is_prefixed_native();
        return lookup_entry(wrapper_method, in_base_library, THREAD);
      }
    }
  }
  return NULL;
}
Example #2
0
klassVtable* arrayKlass::vtable() const {
  KlassHandle kh(Thread::current(), as_klassOop());
  return new klassVtable(kh, start_of_vtable(), vtable_length() / vtableEntry::size());
}
Example #3
0
	void KernelRegister::add(cl_cpd::Kernel* kernel)
	{
		KernelHandler kh(kernel);
		m[kh->getName()] = kh;
	}
Example #4
0
void omxComputeNumericDeriv::computeImpl(FitContext *fc)
{
	if (fc->fitUnits == FIT_UNITS_SQUARED_RESIDUAL ||
	    fc->fitUnits == FIT_UNITS_SQUARED_RESIDUAL_CHISQ) {  // refactor TODO
		numParams = 0;
		if (verbose >= 1) mxLog("%s: derivatives %s units are meaningless",
					name, fitUnitsToName(fc->fitUnits));
		return; //Possible TODO: calculate Hessian anyway?
	}

	int newWanted = fc->wanted | FF_COMPUTE_GRADIENT;
	if (wantHessian) newWanted |= FF_COMPUTE_HESSIAN;

	int nf = fc->calcNumFree();
	if (numParams != 0 && numParams != nf) {
		mxThrow("%s: number of parameters changed from %d to %d",
			 name, numParams, nf);
	}

	numParams = nf;
	if (numParams <= 0) { complainNoFreeParam(); return; }

	optima.resize(numParams);
	fc->copyEstToOptimizer(optima);
	paramMap.resize(numParams);
	for (int px=0,ex=0; px < numParams; ++ex) {
		if (fc->profiledOut[ex]) continue;
		paramMap[px++] = ex;
	}

	omxAlgebraPreeval(fitMat, fc);
	fc->createChildren(fitMat); // allow FIML rowwiseParallel even when parallel=false

	fc->state->countNonlinearConstraints(fc->state->numEqC, fc->state->numIneqC, false);
	int c_n = fc->state->numEqC + fc->state->numIneqC;
	fc->constraintFunVals.resize(c_n);
	fc->constraintJacobian.resize(c_n, numParams);
	if(c_n){
		omxCalcFinalConstraintJacobian(fc, numParams);
	}
	// TODO: Allow more than one hessian value for calculation

	int numChildren = 1;
	if (parallel && !fc->openmpUser && fc->childList.size()) numChildren = fc->childList.size();

	if (!fc->haveReferenceFit(fitMat)) return;

	minimum = fc->fit;

	hessWorkVector = new hess_struct[numChildren];
	if (numChildren == 1) {
		omxPopulateHessianWork(hessWorkVector, fc);
	} else {
		for(int i = 0; i < numChildren; i++) {
			omxPopulateHessianWork(hessWorkVector + i, fc->childList[i]);
		}
	}
	if(verbose >= 1) mxLog("Numerical Hessian approximation (%d children, ref fit %.2f)",
			       numChildren, minimum);

	hessian = NULL;
	if (wantHessian) {
		hessian = fc->getDenseHessUninitialized();
		Eigen::Map< Eigen::MatrixXd > eH(hessian, numParams, numParams);
		eH.setConstant(NA_REAL);

		if (knownHessian) {
			int khSize = int(khMap.size());
			Eigen::Map< Eigen::MatrixXd > kh(knownHessian, khSize, khMap.size());
			for (int rx=0; rx < khSize; ++rx) {
				for (int cx=0; cx < khSize; ++cx) {
					if (khMap[rx] < 0 || khMap[cx] < 0) continue;
					eH(khMap[rx], khMap[cx]) = kh(rx, cx);
				}
			}
		}
	}

	if (detail) {
		recordDetail = false; // already done it once
	} else {
		Rf_protect(detail = Rf_allocVector(VECSXP, 4));
		SET_VECTOR_ELT(detail, 0, Rf_allocVector(LGLSXP, numParams));
		for (int gx=0; gx < 3; ++gx) {
			SET_VECTOR_ELT(detail, 1+gx, Rf_allocVector(REALSXP, numParams));
		}
		SEXP detailCols;
		Rf_protect(detailCols = Rf_allocVector(STRSXP, 4));
		Rf_setAttrib(detail, R_NamesSymbol, detailCols);
		SET_STRING_ELT(detailCols, 0, Rf_mkChar("symmetric"));
		SET_STRING_ELT(detailCols, 1, Rf_mkChar("forward"));
		SET_STRING_ELT(detailCols, 2, Rf_mkChar("central"));
		SET_STRING_ELT(detailCols, 3, Rf_mkChar("backward"));

		SEXP detailRowNames;
		Rf_protect(detailRowNames = Rf_allocVector(STRSXP, numParams));
		Rf_setAttrib(detail, R_RowNamesSymbol, detailRowNames);
		for (int nx=0; nx < int(numParams); ++nx) {
			SET_STRING_ELT(detailRowNames, nx, Rf_mkChar(fc->varGroup->vars[nx]->name));
		}
		markAsDataFrame(detail);
	}

	gforward = REAL(VECTOR_ELT(detail, 1));
	gcentral = REAL(VECTOR_ELT(detail, 2));
	gbackward = REAL(VECTOR_ELT(detail, 3));
	Eigen::Map< Eigen::ArrayXd > Gf(gforward, numParams);
	Eigen::Map< Eigen::ArrayXd > Gc(gcentral, numParams);
	Eigen::Map< Eigen::ArrayXd > Gb(gbackward, numParams);
	Gf.setConstant(NA_REAL);
	Gc.setConstant(NA_REAL);
	Gb.setConstant(NA_REAL);

	calcHessianEntry che(this);
	CovEntrywiseParallel(numChildren, che);

	for(int i = 0; i < numChildren; i++) {
		struct hess_struct *hw = hessWorkVector + i;
		totalProbeCount += hw->probeCount;
	}
	delete [] hessWorkVector;
	if (isErrorRaised()) return;

	Eigen::Map< Eigen::ArrayXi > Gsymmetric(LOGICAL(VECTOR_ELT(detail, 0)), numParams);
	double gradNorm = 0.0;
	
	double feasibilityTolerance = Global->feasibilityTolerance;
	for (int px=0; px < numParams; ++px) {
		// factor out simliar code in ComputeNR
		omxFreeVar &fv = *fc->varGroup->vars[ paramMap[px] ];
		if ((fabs(optima[px] - fv.lbound) < feasibilityTolerance && Gc[px] > 0) ||
		    (fabs(optima[px] - fv.ubound) < feasibilityTolerance && Gc[px] < 0)) {
			Gsymmetric[px] = false;
			continue;
		}
		gradNorm += Gc[px] * Gc[px];
		double relsym = 2 * fabs(Gf[px] + Gb[px]) / (Gb[px] - Gf[px]);
		Gsymmetric[px] = (Gf[px] < 0 && 0 < Gb[px] && relsym < 1.5);
		if (checkGradient && verbose >= 2 && !Gsymmetric[px]) {
			mxLog("%s: param[%d] %d %f", name, px, Gsymmetric[px], relsym);
		}
	}
	
	fc->grad.resize(fc->numParam);
	fc->grad.setZero();
	fc->copyGradFromOptimizer(Gc);
	
	if(c_n){
		fc->inequality.resize(fc->state->numIneqC);
		fc->analyticIneqJacTmp.resize(fc->state->numIneqC, numParams);
		fc->myineqFun(true, verbose, omxConstraint::LESS_THAN, false);
	}

	gradNorm = sqrt(gradNorm);
	double gradThresh = Global->getGradientThreshold(minimum);
	//The gradient will generally not be near zero at a local minimum if there are equality constraints 
	//or active inequality constraints:
	if ( checkGradient && gradNorm > gradThresh && !(fc->state->numEqC || fc->inequality.array().sum()) ) {
		if (verbose >= 1) {
			mxLog("Some gradient entries are too large, norm %f", gradNorm);
		}
		if (fc->getInform() < INFORM_NOT_AT_OPTIMUM) fc->setInform(INFORM_NOT_AT_OPTIMUM);
	}

	fc->setEstFromOptimizer(optima);
	// auxillary information like per-row likelihoods need a refresh
	ComputeFit(name, fitMat, FF_COMPUTE_FIT, fc);
	fc->wanted = newWanted;
}