// Generated file (from: local_response_norm_float_1_relaxed.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type2(Type::FLOAT32, {});
  OperandType type1(Type::INT32, {});
  OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 6});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto radius = model->addOperand(&type1);
  auto bias = model->addOperand(&type2);
  auto alpha = model->addOperand(&type2);
  auto beta = model->addOperand(&type2);
  auto output = model->addOperand(&type0);
  // Phase 2, operations
  static int32_t radius_init[] = {20};
  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
  static float bias_init[] = {9.0f};
  model->setOperandValue(bias, bias_init, sizeof(float) * 1);
  static float alpha_init[] = {4.0f};
  model->setOperandValue(alpha, alpha_init, sizeof(float) * 1);
  static float beta_init[] = {0.5f};
  model->setOperandValue(beta, beta_init, sizeof(float) * 1);
  model->addOperation(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION, {input, radius, bias, alpha, beta}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input},
    {output});
  // Phase 4: set relaxed execution
  model->relaxComputationFloat32toFloat16(true);
  assert(model->isValid());
}
// Generated file (from: depthwise_conv2d_quant8_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type3(Type::INT32, {});
  OperandType type2(Type::TENSOR_INT32, {4}, 0.25f, 0);
  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.f, 127);
  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 127);
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
  // Phase 1, operands
  auto op1 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type1);
  auto op3 = model->addOperand(&type2);
  auto pad_valid = model->addOperand(&type3);
  auto act_none = model->addOperand(&type3);
  auto stride = model->addOperand(&type3);
  auto channelMultiplier = model->addOperand(&type3);
  auto op4 = model->addOperand(&type4);
  // Phase 2, operations
  static uint8_t op2_init[] = {129, 131, 133, 135, 109, 147, 105, 151, 137, 139, 141, 143, 153, 99, 157, 95};
  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
  static int32_t op3_init[] = {4, 8, 12, 16};
  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 4);
  static int32_t pad_valid_init[] = {2};
  model->setOperandValue(pad_valid, pad_valid_init, sizeof(int32_t) * 1);
  static int32_t act_none_init[] = {0};
  model->setOperandValue(act_none, act_none_init, sizeof(int32_t) * 1);
  static int32_t stride_init[] = {1};
  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
  static int32_t channelMultiplier_init[] = {2};
  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad_valid, stride, stride, channelMultiplier, act_none}, {op4});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op1},
    {op4});
  assert(model->isValid());
}
Пример #3
0
bool access_ReadElement(cad_access_module *self, char *buffer, cad_scheme *scheme, cad_route_map *map)
{
	uint32_t number, type1(0), type2(0);

	sscanf(buffer, "D%d DIP%d\n", &number, &type1);
	sscanf(buffer, "D%d SOCKET%d\n", &number, &type2);
		
	if (type1 == 0 && type2 == 0)
	{
		self->sys->kernel->PrintDebug( "Invalid file %s.\nPackage type must be DIPx or SOCKETx, where x > 0", 
			self->sys->fileName);
		return false;
	}

	uint32_t type = (type1 != 0) ? (PT_DIP | type1) : (PT_SOCKET | type2);
	scheme->chip_number++;
	scheme->chips = (cad_chip *)realloc(scheme->chips, sizeof(cad_chip) * scheme->chip_number);
	cad_chip *c = &scheme->chips[ scheme->chip_number - 1 ];
	
	c->left_border	= UNDEFINED_VALUE;
	c->top_border	= UNDEFINED_VALUE;
	c->orientation	= UNDEFINED_VALUE;
	c->position		= UNDEFINED_VALUE;
	c->num			= number;
	c->package_type = type;

	return true;
}
// Generated file (from: conv_3_h3_w2_SAME.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type0(Type::INT32, {});
  OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
  OperandType type2(Type::TENSOR_FLOAT32, {3, 3, 2, 3});
  OperandType type3(Type::TENSOR_FLOAT32, {3});
  // Phase 1, operands
  auto b4 = model->addOperand(&type0);
  auto b5 = model->addOperand(&type0);
  auto b6 = model->addOperand(&type0);
  auto b7 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type1);
  auto op3 = model->addOperand(&type1);
  auto op0 = model->addOperand(&type2);
  auto op1 = model->addOperand(&type3);
  // Phase 2, operations
  static int32_t b4_init[] = {1};
  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
  static int32_t b5_init[] = {1};
  model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
  static int32_t b6_init[] = {1};
  model->setOperandValue(b6, b6_init, sizeof(int32_t) * 1);
  static int32_t b7_init[] = {0};
  model->setOperandValue(b7, b7_init, sizeof(int32_t) * 1);
  static float op0_init[] = {-0.966213f, -0.579455f, -0.684259f, 0.738216f, 0.184325f, 0.0973683f, -0.176863f, -0.23936f, -0.000233404f, 0.055546f, -0.232658f, -0.316404f, -0.012904f, 0.320705f, -0.326657f, -0.919674f, 0.868081f, -0.824608f, -0.467474f, 0.0278809f, 0.563238f, 0.386045f, -0.270568f, -0.941308f, -0.779227f, -0.261492f, -0.774804f, -0.79665f, 0.22473f, -0.414312f, 0.685897f, -0.327792f, 0.77395f, -0.714578f, -0.972365f, 0.0696099f, -0.82203f, -0.79946f, 0.37289f, -0.917775f, 0.82236f, -0.144706f, -0.167188f, 0.268062f, 0.702641f, -0.412223f, 0.755759f, 0.721547f, -0.43637f, -0.274905f, -0.269165f, 0.16102f, 0.819857f, -0.312008f};
  model->setOperandValue(op0, op0_init, sizeof(float) * 54);
  static float op1_init[] = {0.0f, 0.0f, 0.0f};
  model->setOperandValue(op1, op1_init, sizeof(float) * 3);
  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op2},
    {op3});
  assert(model->isValid());
}
Пример #5
0
// Generated file (from: svdf_state.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type5(Type::INT32, {});
  OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
  OperandType type4(Type::TENSOR_FLOAT32, {2, 40});
  OperandType type6(Type::TENSOR_FLOAT32, {2, 4});
  OperandType type2(Type::TENSOR_FLOAT32, {4, 10});
  OperandType type1(Type::TENSOR_FLOAT32, {4, 3});
  OperandType type3(Type::TENSOR_FLOAT32, {4});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto weights_feature = model->addOperand(&type1);
  auto weights_time = model->addOperand(&type2);
  auto bias = model->addOperand(&type3);
  auto state_in = model->addOperand(&type4);
  auto rank_param = model->addOperand(&type5);
  auto activation_param = model->addOperand(&type5);
  auto state_out = model->addOperand(&type4);
  auto output = model->addOperand(&type6);
  // Phase 2, operations
  static int32_t rank_param_init[] = {1};
  model->setOperandValue(rank_param, rank_param_init, sizeof(int32_t) * 1);
  static int32_t activation_param_init[] = {0};
  model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_SVDF, {input, weights_feature, weights_time, bias, state_in, rank_param, activation_param}, {state_out, output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input, weights_feature, weights_time, bias, state_in},
    {state_out, output});
  assert(model->isValid());
}
// Generated file (from: fully_connected_quant8_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type4(Type::INT32, {});
  OperandType type2(Type::TENSOR_INT32, {3}, 0.25f, 0);
  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.f, 127);
  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 10}, 0.5f, 127);
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 1, 5, 1}, 0.5f, 127);
  // Phase 1, operands
  auto op1 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type1);
  auto b0 = model->addOperand(&type2);
  auto op3 = model->addOperand(&type3);
  auto act_relu = model->addOperand(&type4);
  // Phase 2, operations
  static uint8_t op2_init[] = {129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147};
  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 30);
  static int32_t b0_init[] = {4, 8, 12};
  model->setOperandValue(b0, b0_init, sizeof(int32_t) * 3);
  static int32_t act_relu_init[] = {1};
  model->setOperandValue(act_relu, act_relu_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act_relu}, {op3});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op1},
    {op3});
  assert(model->isValid());
}
// Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type2(Type::INT32, {});
  OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1,1,1,2}, 1.f, 0);
  // Phase 1, operands
  auto op1 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type0);
  auto op3 = model->addOperand(&type1);
  auto pad0 = model->addOperand(&type2);
  auto act = model->addOperand(&type2);
  auto stride = model->addOperand(&type2);
  auto channelMultiplier = model->addOperand(&type2);
  auto op4 = model->addOperand(&type3);
  // Phase 2, operations
  static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
  model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
  static int32_t op3_init[] = {0, 0};
  model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
  static int32_t pad0_init[] = {0};
  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
  static int32_t act_init[] = {0};
  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
  static int32_t stride_init[] = {1};
  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
  static int32_t channelMultiplier_init[] = {1};
  model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op1},
    {op4});
  assert(model->isValid());
}
// Generated file (from: fully_connected_float_large.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type3(Type::INT32, {});
  OperandType type2(Type::TENSOR_FLOAT32, {1, 1});
  OperandType type0(Type::TENSOR_FLOAT32, {1, 5});
  OperandType type1(Type::TENSOR_FLOAT32, {1});
  // Phase 1, operands
  auto op1 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type0);
  auto b0 = model->addOperand(&type1);
  auto op3 = model->addOperand(&type2);
  auto act = model->addOperand(&type3);
  // Phase 2, operations
  static float op2_init[] = {2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
  model->setOperandValue(op2, op2_init, sizeof(float) * 5);
  static float b0_init[] = {900000.0f};
  model->setOperandValue(b0, b0_init, sizeof(float) * 1);
  static int32_t act_init[] = {0};
  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op1},
    {op3});
  assert(model->isValid());
}
// Generated file (from: strided_slice_float_11.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type2(Type::INT32, {});
  OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
  OperandType type3(Type::TENSOR_FLOAT32, {3});
  OperandType type1(Type::TENSOR_INT32, {2});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto begins = model->addOperand(&type1);
  auto ends = model->addOperand(&type1);
  auto strides = model->addOperand(&type1);
  auto beginMask = model->addOperand(&type2);
  auto endMask = model->addOperand(&type2);
  auto shrinkAxisMask = model->addOperand(&type2);
  auto output = model->addOperand(&type3);
  // Phase 2, operations
  static int32_t begins_init[] = {0, 0};
  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
  static int32_t ends_init[] = {2, 3};
  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
  static int32_t strides_init[] = {1, 1};
  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
  static int32_t beginMask_init[] = {0};
  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
  static int32_t endMask_init[] = {0};
  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
  static int32_t shrinkAxisMask_init[] = {1};
  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input},
    {output});
  assert(model->isValid());
}
Пример #10
0
// Generated file (from: avg_pool_float_4.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type1(Type::INT32, {});
  OperandType type2(Type::TENSOR_FLOAT32, {5, 11, 13, 3});
  OperandType type0(Type::TENSOR_FLOAT32, {5, 52, 60, 3});
  // Phase 1, operands
  auto i0 = model->addOperand(&type0);
  auto stride = model->addOperand(&type1);
  auto filter = model->addOperand(&type1);
  auto padding = model->addOperand(&type1);
  auto relu6_activation = model->addOperand(&type1);
  auto output = model->addOperand(&type2);
  // Phase 2, operations
  static int32_t stride_init[] = {5};
  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
  static int32_t filter_init[] = {100};
  model->setOperandValue(filter, filter_init, sizeof(int32_t) * 1);
  static int32_t padding_init[] = {50};
  model->setOperandValue(padding, padding_init, sizeof(int32_t) * 1);
  static int32_t relu6_activation_init[] = {3};
  model->setOperandValue(relu6_activation, relu6_activation_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, relu6_activation}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {i0},
    {output});
  assert(model->isValid());
}
Пример #11
0
// Generated file (from: conv_1_h3_w2_SAME.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type0(Type::INT32, {});
  OperandType type3(Type::TENSOR_FLOAT32, {1, 3, 2, 3});
  OperandType type2(Type::TENSOR_FLOAT32, {1, 8, 8, 1});
  OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
  OperandType type4(Type::TENSOR_FLOAT32, {1});
  // Phase 1, operands
  auto b4 = model->addOperand(&type0);
  auto b5 = model->addOperand(&type0);
  auto b6 = model->addOperand(&type0);
  auto b7 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type1);
  auto op3 = model->addOperand(&type2);
  auto op0 = model->addOperand(&type3);
  auto op1 = model->addOperand(&type4);
  // Phase 2, operations
  static int32_t b4_init[] = {1};
  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
  static int32_t b5_init[] = {1};
  model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
  static int32_t b6_init[] = {1};
  model->setOperandValue(b6, b6_init, sizeof(int32_t) * 1);
  static int32_t b7_init[] = {0};
  model->setOperandValue(b7, b7_init, sizeof(int32_t) * 1);
  static float op0_init[] = {-0.966213f, -0.467474f, -0.82203f, -0.579455f, 0.0278809f, -0.79946f, -0.684259f, 0.563238f, 0.37289f, 0.738216f, 0.386045f, -0.917775f, 0.184325f, -0.270568f, 0.82236f, 0.0973683f, -0.941308f, -0.144706f};
  model->setOperandValue(op0, op0_init, sizeof(float) * 18);
  static float op1_init[] = {0.0f};
  model->setOperandValue(op1, op1_init, sizeof(float) * 1);
  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op2},
    {op3});
  assert(model->isValid());
}
// Generated file (from: max_pool_float_3_relaxed.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type1(Type::INT32, {});
  OperandType type2(Type::TENSOR_FLOAT32, {5, 2, 3, 3});
  OperandType type0(Type::TENSOR_FLOAT32, {5, 50, 70, 3});
  // Phase 1, operands
  auto i0 = model->addOperand(&type0);
  auto stride = model->addOperand(&type1);
  auto filter = model->addOperand(&type1);
  auto padding = model->addOperand(&type1);
  auto relu6_activation = model->addOperand(&type1);
  auto output = model->addOperand(&type2);
  // Phase 2, operations
  static int32_t stride_init[] = {20};
  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
  static int32_t filter_init[] = {20};
  model->setOperandValue(filter, filter_init, sizeof(int32_t) * 1);
  static int32_t padding_init[] = {0};
  model->setOperandValue(padding, padding_init, sizeof(int32_t) * 1);
  static int32_t relu6_activation_init[] = {3};
  model->setOperandValue(relu6_activation, relu6_activation_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_MAX_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, relu6_activation}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {i0},
    {output});
  // Phase 4: set relaxed execution
  model->relaxComputationFloat32toFloat16(true);
  assert(model->isValid());
}
Пример #13
0
// Generated file (from: max_pool_quant8_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type1(Type::INT32, {});
  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {5, 2, 3, 3}, 0.5f, 0);
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {5, 50, 70, 3}, 0.5f, 0);
  // Phase 1, operands
  auto i0 = model->addOperand(&type0);
  auto stride = model->addOperand(&type1);
  auto filter = model->addOperand(&type1);
  auto padding = model->addOperand(&type1);
  auto activation = model->addOperand(&type1);
  auto output = model->addOperand(&type2);
  // Phase 2, operations
  static int32_t stride_init[] = {20};
  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
  static int32_t filter_init[] = {20};
  model->setOperandValue(filter, filter_init, sizeof(int32_t) * 1);
  static int32_t padding_init[] = {0};
  model->setOperandValue(padding, padding_init, sizeof(int32_t) * 1);
  static int32_t activation_init[] = {0};
  model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_MAX_POOL_2D, {i0, padding, padding, padding, padding, stride, stride, filter, filter, activation}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {i0},
    {output});
  assert(model->isValid());
}
Пример #14
0
TGraph *gr21Dspline_tanB(RooSplineND *spline, RooRealVar &ldu, RooRealVar &lVu, RooRealVar &kuu, int type, double minNLL, double fixcbma)
{
	TGraph *points = new TGraph();
	int pcounter = 0;

   	double Vldu, VlVu, Vkuu; // holders for the values

	
	for (double th=0.001; th<=10;th+=0.1){

		 double x = fixcbma;
		 double y = TMath::Tan(th);  // x irrelevant in grid search

		 if (type==1)type1(x, y, &Vldu, &VlVu, &Vkuu);
		 if (type==2)type2(x, y, &Vldu, &VlVu, &Vkuu);
          	 ldu.setVal(Vldu);
          	 lVu.setVal(VlVu);
          	 kuu.setVal(Vkuu);

	  	 val = 2*spline->getVal() - minNLL;
		 points->SetPoint(pcounter,val,y);
		 pcounter++;
	}
	points->GetYaxis()->SetTitle("tan(#beta)");
	points->GetXaxis()->SetTitle("-2#Delta Log(L)");
	return points;

}
Пример #15
0
main()
{
    int n;
    scanf("%d",&n) ;
    if(n<=3) printf("NO\n") ;
    else if(n%2==0)
    {
        printf("YES\n") ;
        type1() ;
        for(int i=3;i<=n/2;i++)
        {
            printf("%d - %d = 1\n",2*i,2*i-1) ;
            printf("24 * 1 = 24\n") ;
        }
    }
    else if(n%2==1)
    {
        printf("YES\n") ;
        type2() ;
        for(int i=3;2*i+1<=n;i++)
        {
            printf("%d - %d = 1\n",2*i+1,2*i) ;
            printf("24 * 1 = 24\n") ;
        }
    }
}
Пример #16
0
// Generated file (from: rnn_state_relaxed.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type5(Type::INT32, {});
  OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
  OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
  OperandType type3(Type::TENSOR_FLOAT32, {16});
  OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
  OperandType type0(Type::TENSOR_FLOAT32, {2, 8});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto weights = model->addOperand(&type1);
  auto recurrent_weights = model->addOperand(&type2);
  auto bias = model->addOperand(&type3);
  auto hidden_state_in = model->addOperand(&type4);
  auto activation_param = model->addOperand(&type5);
  auto hidden_state_out = model->addOperand(&type4);
  auto output = model->addOperand(&type4);
  // Phase 2, operations
  static int32_t activation_param_init[] = {1};
  model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_RNN, {input, weights, recurrent_weights, bias, hidden_state_in, activation_param}, {hidden_state_out, output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input, weights, recurrent_weights, bias, hidden_state_in},
    {hidden_state_out, output});
  // Phase 4: set relaxed execution
  model->relaxComputationFloat32toFloat16(true);
  assert(model->isValid());
}
// Generated file (from: conv_quant8_large_weights_as_inputs.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type3(Type::INT32, {});
  OperandType type2(Type::TENSOR_INT32, {3}, 0.25, 0);
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 0.5, 0);
  OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3, 3}, 1.0, 0);
  OperandType type1(Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.5, 0);
  // Phase 1, operands
  auto op1 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type1);
  auto op3 = model->addOperand(&type2);
  auto pad0 = model->addOperand(&type3);
  auto act = model->addOperand(&type3);
  auto stride = model->addOperand(&type3);
  auto op4 = model->addOperand(&type4);
  // Phase 2, operations
  static int32_t pad0_init[] = {0};
  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
  static int32_t act_init[] = {0};
  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
  static int32_t stride_init[] = {1};
  model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op1, op2, op3},
    {op4});
  assert(model->isValid());
}
Пример #18
0
// Generated file (from: lstm2_relaxed.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type8(Type::FLOAT32, {});
  OperandType type7(Type::INT32, {});
  OperandType type5(Type::TENSOR_FLOAT32, {0,0});
  OperandType type3(Type::TENSOR_FLOAT32, {0});
  OperandType type9(Type::TENSOR_FLOAT32, {1, 12});
  OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
  OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
  OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
  OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
  OperandType type4(Type::TENSOR_FLOAT32, {4});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto input_to_input_weights = model->addOperand(&type1);
  auto input_to_forget_weights = model->addOperand(&type1);
  auto input_to_cell_weights = model->addOperand(&type1);
  auto input_to_output_weights = model->addOperand(&type1);
  auto recurrent_to_intput_weights = model->addOperand(&type2);
  auto recurrent_to_forget_weights = model->addOperand(&type2);
  auto recurrent_to_cell_weights = model->addOperand(&type2);
  auto recurrent_to_output_weights = model->addOperand(&type2);
  auto cell_to_input_weights = model->addOperand(&type3);
  auto cell_to_forget_weights = model->addOperand(&type4);
  auto cell_to_output_weights = model->addOperand(&type4);
  auto input_gate_bias = model->addOperand(&type4);
  auto forget_gate_bias = model->addOperand(&type4);
  auto cell_gate_bias = model->addOperand(&type4);
  auto output_gate_bias = model->addOperand(&type4);
  auto projection_weights = model->addOperand(&type5);
  auto projection_bias = model->addOperand(&type3);
  auto output_state_in = model->addOperand(&type6);
  auto cell_state_in = model->addOperand(&type6);
  auto activation_param = model->addOperand(&type7);
  auto cell_clip_param = model->addOperand(&type8);
  auto proj_clip_param = model->addOperand(&type8);
  auto scratch_buffer = model->addOperand(&type9);
  auto output_state_out = model->addOperand(&type6);
  auto cell_state_out = model->addOperand(&type6);
  auto output = model->addOperand(&type6);
  // Phase 2, operations
  static int32_t activation_param_init[] = {4};
  model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
  static float cell_clip_param_init[] = {0.0f};
  model->setOperandValue(cell_clip_param, cell_clip_param_init, sizeof(float) * 1);
  static float proj_clip_param_init[] = {0.0f};
  model->setOperandValue(proj_clip_param, proj_clip_param_init, sizeof(float) * 1);
  model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in},
    {scratch_buffer, output_state_out, cell_state_out, output});
  // Phase 4: set relaxed execution
  model->relaxComputationFloat32toFloat16(true);
  assert(model->isValid());
}
Пример #19
0
static jstring
getExternalStoragePublicDirectory(JNIEnv *env, const char *type)
{
    if (Environment::getExternalStoragePublicDirectory_method == NULL)
        /* needs API level 8 */
        return NULL;

    Java::String type2(env, type);
    jobject file = env->CallStaticObjectMethod(Environment::cls,
                   Environment::getExternalStoragePublicDirectory_method,
                   type2.Get());
    return ToAbsolutePathChecked(env, file);
}
Пример #20
0
int main()
{
	type1();
	printf("\n");
	type2();
	printf("\n");
	type3();
	printf("\n");
	type4();
	printf("\n");
	type5();
	printf("\n");
	rectange_interview();
	return 0;
}
Пример #21
0
void SettingDlg::setOcList()
{
    QString type0 = lng->tr("FT_CLOSE_OC");
    QString type1("399");
    QString type2("532");

    occb->insertItem(type0, 0);
    occb->insertItem(type1, 1);
    occb->insertItem(type2, 2);

    if(cfg->cpuLockType == "0")
        occb->setCurrentItem(0);
    else if(cfg->cpuLockType == "399")
        occb->setCurrentItem(1);
    else
        occb->setCurrentItem(2);
}
// Generated file (from: space_to_depth_quant8_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type1(Type::INT32, {});
  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 0);
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 0);
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto radius = model->addOperand(&type1);
  auto output = model->addOperand(&type2);
  // Phase 2, operations
  static int32_t radius_init[] = {2};
  model->setOperandValue(radius, radius_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_SPACE_TO_DEPTH, {input, radius}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input},
    {output});
  assert(model->isValid());
}
// Generated file (from: depth_to_space_float_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type1(Type::INT32, {});
  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
  OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto block_size = model->addOperand(&type1);
  auto output = model->addOperand(&type2);
  // Phase 2, operations
  static int32_t block_size_init[] = {2};
  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_DEPTH_TO_SPACE, {input, block_size}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input},
    {output});
  assert(model->isValid());
}
Пример #24
0
// Generated file (from: mul_quant8.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type1(Type::INT32, {});
  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0, 0);
  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 2.0, 0);
  // Phase 1, operands
  auto op1 = model->addOperand(&type0);
  auto op2 = model->addOperand(&type0);
  auto act = model->addOperand(&type1);
  auto op3 = model->addOperand(&type2);
  // Phase 2, operations
  static int32_t act_init[] = {0};
  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {op1, op2},
    {op3});
  assert(model->isValid());
}
Пример #25
0
int area(int type)
{
    int maxh, maxw;

    switch(type) {
    /*case 0 : maxw=dane[0].w+dane[1].w+dane[2].w+dane[3].w;
             maxh=max(dane[0].h,dane[1].h,dane[2].h,dane[3].h);
    	   return maxw*maxh;*/
    case 0 :
        return type0();
    case 1 :
        return type1();
    case 2 :
        return type2();
    case 3 :
        return type3();
    case 4 :
        return type4();
    }
}
Пример #26
0
// Generated file (from: concat_float_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type2(Type::INT32, {});
  OperandType type1(Type::TENSOR_FLOAT32, {40, 230});
  OperandType type0(Type::TENSOR_FLOAT32, {52, 230});
  OperandType type3(Type::TENSOR_FLOAT32, {92, 230});
  // Phase 1, operands
  auto input1 = model->addOperand(&type0);
  auto input2 = model->addOperand(&type1);
  auto axis0 = model->addOperand(&type2);
  auto output = model->addOperand(&type3);
  // Phase 2, operations
  static int32_t axis0_init[] = {0};
  model->setOperandValue(axis0, axis0_init, sizeof(int32_t) * 1);
  model->addOperation(ANEURALNETWORKS_CONCATENATION, {input1, input2, axis0}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input1, input2},
    {output});
  assert(model->isValid());
}
// Generated file (from: space_to_batch_float_2.mod.py). Do not edit
void CreateModel(Model *model) {
  OperandType type0(Type::TENSOR_FLOAT32, {1, 5, 2, 1});
  OperandType type3(Type::TENSOR_FLOAT32, {6, 2, 2, 1});
  OperandType type2(Type::TENSOR_INT32, {2, 2});
  OperandType type1(Type::TENSOR_INT32, {2});
  // Phase 1, operands
  auto input = model->addOperand(&type0);
  auto block_size = model->addOperand(&type1);
  auto paddings = model->addOperand(&type2);
  auto output = model->addOperand(&type3);
  // Phase 2, operations
  static int32_t block_size_init[] = {3, 2};
  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
  static int32_t paddings_init[] = {1, 0, 2, 0};
  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
  // Phase 3, inputs and outputs
  model->identifyInputsAndOutputs(
    {input},
    {output});
  assert(model->isValid());
}
Пример #28
0
type_t foo(const type_t &type)
{
	type_t type2(type);

	return type2;
}
Пример #29
0
void makeLikelihoodRotation(std::string inname, std::string outname, double SMOOTH, bool isAsimov=false){

   gSystem->Load("libHiggsAnalysisCombinedLimit.so");
   //TFile *fi = TFile::Open("lduscan_neg_ext/3D/lduscan_neg_ext_3D.root");
   //TFile *fi = TFile::Open("lduscan_neg_ext_2/exp3D/lduscan_neg_ext_2_exp3D.root");
   TFile *fi = TFile::Open(inname.c_str());
   TTree *tree = (TTree*)fi->Get("limit");
   //TTree *tree = new TTree("tree_vals","tree_vals");  

   // ------------------------------ THIS IS WHERE WE BUILD THE SPLINE ------------------------ //
   // Create 2 Real-vars, one for each of the parameters of the spline 
   // The variables MUST be named the same as the corresponding branches in the tree
   //
   RooRealVar ldu("lambda_du","lambda_du",0.1,-2.5,2.5); 
   RooRealVar lVu("lambda_Vu","lambda_Vu",0.1,0,2.2);
   RooRealVar kuu("kappa_uu","kappa_uu",0.1,0,2.2);
   
   RooSplineND *spline = new RooSplineND("spline","spline",RooArgList(ldu,lVu,kuu),tree,"deltaNLL",SMOOTH,true,"deltaNLL >= 0 && deltaNLL < 500 && ( (TMath::Abs(quantileExpected)!=1 && TMath::Abs(quantileExpected)!=0) || (Entry$==0) )");
   // ----------------------------------------------------------------------------------------- //
   
   //TGraph *gr = spline->getGraph("x",0.1); // Return 1D graph. Will be a slice of the spline for fixed y generated at steps of 0.1
   fOut = new TFile(outname.c_str(),"RECREATE");

   // Plot the 2D spline 

   /*
   TGraph2D *gcvcf = new TGraph2D(); gcvcf->SetName("cvcf");
   TGraph2D *gcvcf_kuu = new TGraph2D(); gcvcf_kuu->SetName("cvcf_kuu");
   TGraph2D *gcvcf_lVu = new TGraph2D(); gcvcf_lVu->SetName("cvcf_lVu");
   */
   TGraph2D *type1_minscan = new TGraph2D(); 
   type1_minscan->SetName("type1_minscan");
   TGraph2D *type2_minscan = new TGraph2D(); 
   type2_minscan->SetName("type2_minscan");

   TGraph2D *gr_ldu 		= new TGraph2D(); gr_ldu->SetName("t1_ldu");
   TGraph2D *gr_lVu 		= new TGraph2D(); gr_lVu->SetName("t1_lVu");
   TGraph2D *gr_kuu 		= new TGraph2D(); gr_kuu->SetName("t1_kuu");
   TGraph2D *gr2_ldu		= new TGraph2D(); gr2_ldu->SetName("t2_ldu");
   TGraph2D *gr2_lVu 		= new TGraph2D(); gr2_lVu->SetName("t2_lVu");
   TGraph2D *gr2_kuu 		= new TGraph2D(); gr2_kuu->SetName("t2_kuu");

   TGraph2D *gr_ku 		= new TGraph2D(); gr_ku->SetName("t1_ku");
   TGraph2D *gr_kd 		= new TGraph2D(); gr_kd->SetName("t1_kd");
   TGraph2D *gr_kV 		= new TGraph2D(); gr_kV->SetName("t1_kV");

   TGraph2D *gr2_ku 		= new TGraph2D(); gr2_ku->SetName("t2_ku");
   TGraph2D *gr2_kd 		= new TGraph2D(); gr2_kd->SetName("t2_kd");
   TGraph2D *gr2_kV 		= new TGraph2D(); gr2_kV->SetName("t2_kV");

   TGraph2D *gr_beta		= new TGraph2D(); gr_beta->SetName("beta");
   TGraph2D *gr_bma		= new TGraph2D(); gr_bma->SetName("beta_minis_alpha");
   // check the values of the three parameters during the scan ?!


   double Vldu, VlVu, Vkuu; // holders for the values
   int pt1,pt2 = 0;

   double mint2 = 10000;
   double mint1 = 10000;
   double mint1_x = 10000;
   double mint1_y = 10000;
   double mint2_x = 10000;
   double mint2_y = 10000;

   double mint1_lVu = 10000;
   double mint1_ldu = 10000;
   double mint1_kuu = 10000;

   double mint2_lVu = 10000;
   double mint2_ldu = 10000;
   double mint2_kuu = 10000;

   int ccounter = 0;

   double Vku, Vkd, VkV; 

   TGraph2D *g_FFS = new TGraph2D(); g_FFS->SetName("ffs_ldu_1");
   int pt=0;
   for (double x=0.;x<=3.0;x+=0.05){
     for (double y=0.;y<=3.0;y+=0.05){
	ldu.setVal(1);
	lVu.setVal(y);
	kuu.setVal(x);
	double dnll2 = 2*spline->getVal();
	g_FFS->SetPoint(pt,x,y,dnll2);
	pt++;
     }
   }

   if (!isAsimov){

    double Vbma, Vbeta; 

    for (double cbma=-0.8;cbma<0.8;cbma+=0.01){
     for (double b=0.1;b<1.4;b+=0.05){
        double tanb = TMath::Tan(b);

	getAngles(cbma,tanb,&Vbeta,&Vbma);

	type1(cbma, tanb, &Vldu, &VlVu, &Vkuu);
	type1_ex(cbma, tanb, &Vku, &Vkd, &VkV);

	if (Vldu > ldu.getMax() || Vldu < ldu.getMin()) {
        	type1_minscan->SetPoint(ccounter,cbma,tanb,10);
	}
	if (VlVu > lVu.getMax() || VlVu < lVu.getMin()) {
        	type1_minscan->SetPoint(ccounter,cbma,tanb,10);
	}
	if (Vkuu > kuu.getMax() || Vkuu < kuu.getMin()) {
        	type1_minscan->SetPoint(ccounter,cbma,tanb,10);
	} else {
         ldu.setVal(Vldu);lVu.setVal(VlVu);kuu.setVal(Vkuu);
	 double dnll2 = 2*spline->getVal();
	 if (dnll2 < mint1) { 
		mint1_x = cbma;
		mint1_y = tanb;
		mint1 = dnll2;

		mint1_lVu = VlVu; 
		mint1_kuu = Vkuu;
		mint1_ldu = Vldu;
	 }
	 type1_minscan->SetPoint(ccounter,cbma,tanb,dnll2);
	}
	//std::cout << " Checking point cbma,tanb -> ldu, lVu, kuu == 2DeltaNLL " << cbma << ", " << tanb << " --> " << Vldu << ", " << VlVu << ", " << Vkuu << " == " << dnll2 << std::endl;
        gr_ldu->SetPoint(ccounter,cbma,tanb,Vldu);
        gr_lVu->SetPoint(ccounter,cbma,tanb,VlVu);
        gr_kuu->SetPoint(ccounter,cbma,tanb,Vkuu);

        gr_ku->SetPoint(ccounter,cbma,tanb,Vku);
        gr_kd->SetPoint(ccounter,cbma,tanb,Vkd);
        gr_kV->SetPoint(ccounter,cbma,tanb,VkV);

	
	type2(cbma, tanb, &Vldu, &VlVu, &Vkuu);
	type2_ex(cbma, tanb, &Vku, &Vkd, &VkV);

	if (Vldu > ldu.getMax() || Vldu < ldu.getMin()) {
        	type2_minscan->SetPoint(ccounter,cbma,tanb,10);
	}
	if (VlVu > lVu.getMax() || VlVu < lVu.getMin()) {
        	type2_minscan->SetPoint(ccounter,cbma,tanb,10);
	}
	if (Vkuu > kuu.getMax() || Vkuu < kuu.getMin()) {
        	type2_minscan->SetPoint(ccounter,cbma,tanb,10);
	} else {
         ldu.setVal(Vldu);lVu.setVal(VlVu);kuu.setVal(Vkuu);
	 double dnll2 = 2*spline->getVal();
	 if (dnll2 < mint2) {
		mint2_x = cbma;
		mint2_y = tanb;
		mint2 = dnll2;

		mint2_lVu = VlVu; 
		mint2_kuu = Vkuu;
		mint2_ldu = Vldu;
	 }
	 type2_minscan->SetPoint(ccounter,cbma,tanb,dnll2);
	}
        gr2_ldu->SetPoint(ccounter,cbma,tanb,Vldu);
        gr2_lVu->SetPoint(ccounter,cbma,tanb,VlVu);
        gr2_kuu->SetPoint(ccounter,cbma,tanb,Vkuu);

        gr2_ku->SetPoint(ccounter,cbma,tanb,Vku);
        gr2_kd->SetPoint(ccounter,cbma,tanb,Vkd);
        gr2_kV->SetPoint(ccounter,cbma,tanb,VkV);

	gr_beta->SetPoint(ccounter,cbma,tanb,Vbeta);
	gr_bma->SetPoint(ccounter,cbma,tanb,Vbma);

	ccounter++;
     }
    }
    std::cout << "T1 Minimum found at " << mint1_x << "," << mint1_y << "( or in lVu, kuu, ldu) = " << mint1_lVu << ", " << mint1_kuu << ", " << mint1_ldu  << ", val=" << mint1 << std::endl;
    std::cout << "T2 Minimum found at " << mint2_x << "," << mint2_y << "( or in lVu, kuu, ldu) = " << mint2_lVu << ", " << mint2_kuu << ", " << mint2_ldu  <<", val=" << mint2 << std::endl;
   }
   else { // Probably then use the Asimov
		
         ldu.setVal(1);lVu.setVal(1);kuu.setVal(1);
	 double dnll2 = 2*spline->getVal();
	 mint1 = dnll2;
	 mint2 = dnll2;	
   }


   
   TGraph *type1_0p1 = (TGraph*)gr21Dspline_tanB(spline, ldu, lVu, kuu, 1, mint1, 0.1);
   TGraph *type2_0p1 = (TGraph*)gr21Dspline_tanB(spline, ldu, lVu, kuu, 2, mint2, 0.1);


   type1_0p1->SetName("type1_cbma0p1");
   type2_0p1->SetName("type2_cbma0p1");
   type1_0p1->Write();
   type2_0p1->Write();

   TGraph * gr_type1 = (TGraph*)gr2contour(spline, ldu, lVu, kuu, 1, 5.99, mint1, 0, 1., 0.01, 0.001);
   TGraph * gr_type2 = (TGraph*)gr2contour(spline, ldu, lVu, kuu, 2, 5.99, mint2, 0, 1., 0.01, 0.001);

   gr_type1->SetName("type1");
   gr_type2->SetName("type2");
   gr_type1->Write();
   gr_type2->Write();

   //gr_type1->Draw("p"); 
   fOut->cd(); 


   type1_minscan->Write();
   type2_minscan->Write();

   gr_ldu->SetMinimum(-2.5);  gr_ldu->SetMaximum(2.5); 
   gr_lVu->SetMinimum(0)   ;  gr_lVu->SetMaximum(3); 
   gr_kuu->SetMinimum(0)   ;  gr_kuu->SetMaximum(3);

   gr2_ldu->SetMinimum(-2.5);  gr2_ldu->SetMaximum(2.5); 
   gr2_lVu->SetMinimum(0)   ;  gr2_lVu->SetMaximum(3); 
   gr2_kuu->SetMinimum(0)   ;  gr2_kuu->SetMaximum(3); 

   gr_ldu->Write(); gr_lVu->Write(); gr_kuu->Write();
   gr2_ldu->Write(); gr2_lVu->Write(); gr2_kuu->Write();

   gr_ku->Write(); 
   gr_kd->Write(); 
   gr_kV->Write(); 
      
   gr2_ku->Write();
   gr2_kd->Write();
   gr2_kV->Write();

   g_FFS->Write();

   gr_beta->Write();
   gr_bma->Write();

   std::cout << "Saved stuff to -> " << fOut->GetName() << std::endl; 
   fOut->Close();
}
Пример #30
0
TGraph * gr2contour(RooSplineND *spline, RooRealVar &ldu, RooRealVar &lVu, RooRealVar &kuu, int type, double level, double minNLL, double best_x, double best_y, double step_r, double step_th)
{

	TGraph *points = new TGraph();
	int pcounter = 0;

   	double Vldu, VlVu, Vkuu; // holders for the values

        // Define 0 as the +ve Y-axis
	std::cout << " Centered at (type) " << best_x << ", " << best_y << ", " << type << std::endl;
	std::cout << " Minimum assumed to be " << minNLL << std::endl;
	TGraph *thepoints = new TGraph();
	int pointcounter =0 ;

	//for (double th=0; th<2*TMath::Pi();th+=step_th){
	for (double th=0.001; th<=10;th+=step_th){
	//for (double th=5.; th<6.;th+=0.2){	

		double r_pre_level=-1;
		double val_pre_level=-1;

		bool iscontained=true;
		//double r=0.05;
		double r=-0.99;

		bool invertLogic=false;
		while (iscontained){

		 double x = get_x(r,th,best_x);
		 double y = get_y(r,th,best_y);

		 if (x > 1 || y > 10 || x < -1 || y < 0.0001 ){
			iscontained=false;
			break;
		 }

   		 // x = cos(b-a) and y=tanb
		 if (type==1)type1(x, y, &Vldu, &VlVu, &Vkuu);
		 if (type==2)type2(x, y, &Vldu, &VlVu, &Vkuu);
		 double val = 1000;
		 if ( Vldu < ldu.getMax() && Vldu > ldu.getMin() && VlVu < lVu.getMax() && VlVu > lVu.getMin() && Vkuu < kuu.getMax() && Vkuu > kuu.getMin() ){

          	   ldu.setVal(Vldu);
          	   lVu.setVal(VlVu);
          	   kuu.setVal(Vkuu);

	  	   val = 2*spline->getVal() - minNLL;
		 }

		 if ( (invertLogic && val<level) || ( (!invertLogic) && val>level) ){
			double ave_r = interp(r,val,r_pre_level,val_pre_level,level);  // Do a better interpolation later
			if (get_x(ave_r,th,best_x) > -0.89 && get_x(ave_r,th,best_x) < 0.89 && y>0.05 && y < 11){
			  points->SetPoint(pcounter,get_x(ave_r,th,best_x),get_y(ave_r,th,best_y));
			  pcounter++;
			  //break;
			  //std::cout << " Oh I found a cheeky point!  x, y=" << get_x(ave_r,th,best_x) << ", " << get_y(ave_r,th,best_y) << ", (ave_r,th, r,r_pre=)" << ave_r << ", " << th << " avarage from " << r << ", " <<  r_pre_level << " (value,val_pre) == " << val << ", " << val_pre_level <<  std::endl;
			}
			invertLogic=(!invertLogic);
		 //} else {
		 }
		 r_pre_level = r;
		 val_pre_level = val;
		 //}
		 thepoints->SetPoint(pointcounter,get_x(r,th,best_x),get_y(r,th,best_y));
		 pointcounter++;

		 r+=step_r;
		}
	}

	points->SetMarkerStyle(20);
	points->SetMarkerSize(0.5);

        thepoints->SetMarkerColor(kRed);
	//thepoints->Draw("AP");
	//points->Draw("apL");

	return points;
}