コード例 #1
0
ファイル: libbrctl.c プロジェクト: MichaelQQ/Quagga-PE
int showstp_cmd(const char* brname){
	int res;
  alloc_args();
  set_arg("showmacs");
  sprintf(buffer, "%s", brname);
  set_arg(buffer);
  res = brctl_action(arg_index, args);
  free_args();
  return res;
}
コード例 #2
0
ファイル: search_n.hpp プロジェクト: Soledad89/compute
    event exec(command_queue &queue)
    {
        if(m_count == 0) {
            return event();
        }

        set_arg(m_n_arg, uint_(m_n));
        set_arg(m_value_arg, m_value);

        return exec_1d(queue, 0, m_count);
    }
コード例 #3
0
ファイル: libbrctl.c プロジェクト: MichaelQQ/Quagga-PE
int delbr_cmd(const char *brname){
	printf("DEBUG(BRCTLD):delbr_cmd enter.\n");
	int res;
  alloc_args();
  set_arg("delbr");
  sprintf(buffer, "%s", brname);
  set_arg(buffer);
  res = brctl_action(arg_index, args);
  free_args();
  printf("DEBUG(BRCTLD):delbr_cmd exit. res:%d.\n",res);
  return res;
}
コード例 #4
0
ファイル: libbrctl.c プロジェクト: MichaelQQ/Quagga-PE
int setageing_cmd(const char* brname,int timer){
	printf("DEBUG(BRCTLD):setageing_cmd enter.\n");
	int res;
  alloc_args();
  set_arg("setageing");
  sprintf(buffer, "%s", brname);
  set_arg(buffer);
  sprintf(buffer, "%d", timer);
  set_arg(buffer);
  res = brctl_action(arg_index, args);
  free_args();
  printf("DEBUG(BRCTLD):setageing_cmd exit. res:%d.\n",res);
  return res;
}
コード例 #5
0
ファイル: ClientApp.cpp プロジェクト: chenbk85/ACE-Middleware
int
ClientApp::parse_args(int argc, ACE_TCHAR* argv[])
{
  this->exe_name_ = argv[0];

  ACE_Get_Opt get_opts(argc, argv, ACE_TEXT("i:k:n:"));

  int c;

  while ((c = get_opts()) != -1)
    {
      int result = 0;
      switch (c)
      {
        case 'i':
          this->ior_ = get_opts.opt_arg();
          break;

        case 'k':
          result = set_arg(this->client_kind_,
                  get_opts.opt_arg(),
                  c,
                  "client_kind");
          break;

        case 'n':
          result = set_arg(this->client_id_,
                  get_opts.opt_arg(),
                  c,
                  "client_id");
          break;

        case '?':
          this->usage_statement();
          return 1;

        default:
          this->usage_statement();
          return -1;
      }

      if (result != 0)
        {
          return result;
        }
    }

  return this->arg_dependency_checks();
}
コード例 #6
0
ファイル: eval.c プロジェクト: doly/femtoutil
int lfunc(cons_t *next)
{
	cons_t *now;
	now = next;
	int fc;

	fc = get_func(next->cvalue);
	if (g_fa[fc].key == NULL) {
		printf("syntax error\n");
		return 0;
	} else {	
		next = next->cdr;
		set_arg(next, g_argl, 0);

		int rec = g_fa[fc].rec_num;

			g_argl++;

			if (rec >= 2) {
				rec_cal(fc);
			}
			now->result = eval(g_fa[fc].exp);

			g_argl--;

		return now->result;
	}
}
コード例 #7
0
void CL_StringFormat::set_arg(int index, long unsigned int value, int min_length)
{
	CL_String t = CL_StringHelp::ull_to_text(value);
	if ((int) t.length() < min_length)
		t = CL_String(min_length-t.length(), '0') + t;
	set_arg(index, t);
}
コード例 #8
0
ファイル: get_args.c プロジェクト: vistalite68/ft_p
static int		fill_args(char **args, char *str)
{
	size_t		i;
	size_t		arg_nb;
	char		*pos[2];

	i = ~0lu;
	arg_nb = 0;
	ft_bzero(pos, sizeof(char *) * 2);
	while (str[++i] != '\0')
	{
		if (str[i] != '"' || ((i > 0 && str[i - 1] == '\\')))
			continue ;
		if (pos[START] == NULL)
			pos[START] = str + i + 1;
		else
			pos[END] = str + i;
		if (pos[START] == NULL || pos[END] == NULL)
			continue ;
		if (set_arg(&args[arg_nb++], pos[START], pos[END] - pos[START]) < 0)
			return arg_nb - 1;
		ft_bzero(pos, sizeof(char *) * 2);
	}
	return arg_nb;
}
コード例 #9
0
ファイル: lua.vecmath.c プロジェクト: kidaa/Synthesis
static int mat3_set_row(lua_State *L)
{
	t_jit_mat3 m;
	t_jit_vec3 v;
	
	if(! lua_istable(L, 1))
		luaL_error(L, " first argument must be a table");
	
	if(! lua_isnumber(L, 2))
		luaL_error(L, " second argument must be a number");
		
	if(! lua_istable(L, 3))
		luaL_error(L, " third argument must be a table");

	table_to_mat3(L, 1, &m);
	table_to_vec3(L, 3, &v);
	
	jit_mat3_set_row(&m, lua_tointeger(L, 2), &v);
	
	mat3_to_table(L, &m);
	
	set_arg(L, 1);
	lua_pop(L, 1);
	
	return 0;
}
コード例 #10
0
ファイル: simple_training_net.c プロジェクト: zeno40/convnet
mkldnn_status_t prepare_reorder(mkldnn_memory_t *user_memory, /** in */
        const mkldnn_memory_desc_t *prim_memory_md, /** in */
        mkldnn_engine_t prim_engine, /** in: primitive's engine */
        int dir_is_user_to_prim, /** in: user -> prim or prim -> user */
        mkldnn_memory_t *prim_memory, /** out: primitive's memory created */
        mkldnn_primitive_t *reorder, /** out: reorder primitive created */
        uint32_t *
                net_index, /** primitive index in net (inc if reorder created)
                            */
        mkldnn_primitive_t *net, args_t *net_args /** net params */) {
    const mkldnn_memory_desc_t *user_memory_md;
    mkldnn_memory_get_memory_desc(*user_memory, &user_memory_md);

    mkldnn_engine_t user_mem_engine;
    mkldnn_memory_get_engine(*user_memory, &user_mem_engine);

    if (!mkldnn_memory_desc_equal(user_memory_md, prim_memory_md)) {
        CHECK(mkldnn_memory_create(prim_memory, prim_memory_md, prim_engine,
                MKLDNN_MEMORY_ALLOCATE));

        mkldnn_primitive_desc_t reorder_pd;
        if (dir_is_user_to_prim) {
            CHECK(mkldnn_reorder_primitive_desc_create(&reorder_pd,
                    user_memory_md, user_mem_engine, prim_memory_md,
                    prim_engine, NULL));
        } else {
            CHECK(mkldnn_reorder_primitive_desc_create(&reorder_pd,
                    prim_memory_md, prim_engine, user_memory_md,
                    user_mem_engine, NULL));
        }
        CHECK(mkldnn_primitive_create(reorder, reorder_pd));
        CHECK(mkldnn_primitive_desc_destroy(reorder_pd));

        net[*net_index] = *reorder;
        prepare_arg_node(&net_args[*net_index], 2);
        set_arg(&net_args[*net_index].args[0], MKLDNN_ARG_FROM,
                dir_is_user_to_prim ? *user_memory : *prim_memory);
        set_arg(&net_args[*net_index].args[1], MKLDNN_ARG_TO,
                dir_is_user_to_prim ? *prim_memory : *user_memory);
        (*net_index)++;
    } else {
        *prim_memory = NULL;
        *reorder = NULL;
    }

    return mkldnn_success;
}
コード例 #11
0
    std::string get_image_identifier(const char *qualifiers, const image2d &image)
    {
        size_t index = add_arg_with_qualifiers<image2d>(qualifiers, "image");

        set_arg(index, image);

        return "image";
    }
コード例 #12
0
ファイル: libbrctl.c プロジェクト: MichaelQQ/Quagga-PE
int show_cmd(){
	int res;
  alloc_args();
  set_arg("show");
  res = brctl_action(arg_index, args);
  free_args();
  return res;
}
コード例 #13
0
ファイル: eval.c プロジェクト: doly/femtoutil
void set_arg(cons_t *arg_value,int layer, int arg_num)
{
	int ans;
	if (arg_value != NULL) {
		ans = get_value(arg_value);
		arg_num++;
		set_arg(arg_value->cdr, layer, arg_num);
		g_arga[layer + 1][arg_num - 1] = ans;
	}
}
コード例 #14
0
ファイル: copy_on_device.hpp プロジェクト: junmuz/compute
    event exec(command_queue &queue)
    {
        if(m_count == 0){
            // nothing to do
            return event();
        }

        size_t global_work_size = calculate_work_size(m_count, m_vpt, m_tpb);

        set_arg(m_count_arg, uint_(m_count));

        return exec_1d(queue, 0, global_work_size, m_tpb);
    }
コード例 #15
0
model::model(init_arg *args)
{
    arma::arma_rng::set_seed_random();
    check_arg(*args);
    set_arg(args);

    training_mean = make_unique<vec_t>(NUM_TRAINING_DATA, arma::fill::zeros);
    training_cov = make_unique<mat_t>(NUM_TRAINING_DATA, NUM_TRAINING_DATA, arma::fill::zeros);
    chol_cov = make_unique<mat_t>(NUM_TRAINING_DATA, NUM_TRAINING_DATA, arma::fill::zeros);
    mixture_cov = make_unique<vec_t>(NUM_TRAINING_DATA, arma::fill::zeros);
    testing_cov = make_unique<vec_t>(1, arma::fill::zeros);
    alpha = make_unique<vec_t>(NUM_TRAINING_DATA, arma::fill::zeros);
    v = make_unique<vec_t>(NUM_TRAINING_DATA, arma::fill::zeros);
    testing_target = make_unique<vec_t>(NUM_TESTING_DATA, arma::fill::zeros);
    testing_var = make_unique<vec_t>(NUM_TESTING_DATA, arma::fill::zeros);
}
コード例 #16
0
ファイル: eval.c プロジェクト: doly/femtoutil
int rfunc(cons_t *next)
{
	cons_t *now;
	now = next;
	next = next->cdr;
	set_arg(next, g_argl);

	g_argl++;

	now->result = eval(g_fa[now->ivalue].exp);

	g_argl--;	

	return now->result;

}
コード例 #17
0
ファイル: lua.vecmath.c プロジェクト: kidaa/Synthesis
static int mat4_negate(lua_State *L)
{
	t_jit_mat4 m;
	
	if(! lua_istable(L, 1))
		luaL_error(L, "first argument must be a table");

	table_to_mat4(L, 1, &m);
	
	jit_mat4_negate(&m);

	mat4_to_table(L, &m);
	
	set_arg(L, 1);
	lua_pop(L, 1);
	
	return 0;
}
コード例 #18
0
void agent_act(unsigned char * img_bytes, int img_width, int img_height, bool_t img_is_belly, int pass_button,
	navdata_unpacked_t * navdata, commands_t * commands)
{    
	int k = 0;

	PyObject *pImageBytes = PyByteArray_FromStringAndSize((const char *)img_bytes, img_width*img_height*3);
	
	set_arg(pImageBytes,                         k++);
	set_int_arg(img_width, 	                     k++);
	set_int_arg(img_height,	                     k++);
	
	set_int_arg(img_is_belly?1:0, 	             k++);
	set_int_arg(pass_button,				     k++);

    navdata_demo_t demo = navdata->navdata_demo;
	
	set_int_arg(demo.ctrl_state, 	         k++);
	set_int_arg(demo.vbat_flying_percentage, k++);
	set_float_arg(demo.theta,                k++);
	set_float_arg(demo.phi,                  k++);
	set_float_arg(demo.psi,                  k++);

	set_int_arg(navdata->navdata_altitude.altitude_raw, k++);

    navdata_vision_raw_t vision_raw = navdata->navdata_vision_raw;

	set_float_arg(vision_raw.vision_tx_raw, k++);
	set_float_arg(vision_raw.vision_ty_raw, k++);
	
	PyObject * pResult = PyObject_CallObject(pFunc, pArgs);
	
	if (!pResult)
{
		fun_error("Call failed\n", "");
	}
	
	k = 0;
	commands->zap     = get_int_result(pResult,   k++);
	commands->phi     = get_float_result(pResult, k++);
	commands->theta   = get_float_result(pResult, k++);
	commands->gaz     = get_float_result(pResult, k++);
	commands->yaw     = get_float_result(pResult, k++);
}
コード例 #19
0
ファイル: lua.vecmath.c プロジェクト: kidaa/Synthesis
static int mat4_set_translation(lua_State *L)
{
	t_jit_mat4 m;
	t_jit_vec3 v;

	if(! lua_istable(L, 1))
		luaL_error(L, "first argument must be a table");
		
	if(! lua_istable(L, 1))
		luaL_error(L, "second argument must be a table");

	table_to_mat4(L, 1, &m);
	table_to_vec3(L, 1, &v);

	jit_mat4_set_translation(&m, &v);

	mat4_to_table(L, &m);
	set_arg(L, 1);
	lua_pop(L, 1);
	
	return 0;
}
コード例 #20
0
ファイル: eval.c プロジェクト: doly/femtoutil
int lfunc(cons_t *next)
{
	cons_t *now;
	now = next;
	int fc;

	fc = getfunc(next->cvalue);
	if (g_fa[fc].key == NULL) {
		printf("syntax error\n");
		return 0;
	} else {	
		next = next->cdr;
		set_arg(next, g_argl);

		g_argl++;

		now->result = eval(g_fa[fc].exp);

		g_argl--;	

		return now->result;
	}
}
コード例 #21
0
ファイル: pgp-pgsql.c プロジェクト: adunstan/pg-cvs-mirror
static int
parse_args(PGP_Context *ctx, uint8 *args, int arg_len,
		   struct debug_expect * ex)
{
	char	   *str = downcase_convert(args, arg_len);
	char	   *key,
			   *val;
	int			key_len,
				val_len;
	int			res = 0;
	char	   *p = str;

	while (*p)
	{
		res = PXE_ARGUMENT_ERROR;
		p = getword(p, &key, &key_len);
		if (*p++ != '=')
			break;
		p = getword(p, &val, &val_len);
		if (*p == '\0')
			;
		else if (*p++ != ',')
			break;

		if (*key == 0 || *val == 0 || val_len == 0)
			break;

		key[key_len] = 0;
		val[val_len] = 0;

		res = set_arg(ctx, key, val, ex);
		if (res < 0)
			break;
	}
	pfree(str);
	return res;
}
コード例 #22
0
static void set_float_arg(float val, int pos)
{
	
	set_arg(PyFloat_FromDouble((double)val), pos);
}
コード例 #23
0
ファイル: simple_training_net.c プロジェクト: zeno40/convnet
mkldnn_status_t simple_net() {
    mkldnn_engine_t engine;
    CHECK(mkldnn_engine_create(&engine, mkldnn_cpu, 0 /* idx */));

    /* build a simple net */
    uint32_t n_fwd = 0, n_bwd = 0;
    mkldnn_primitive_t net_fwd[10], net_bwd[10];
    args_t net_fwd_args[10], net_bwd_args[10];

    mkldnn_dim_t net_src_sizes[4] = { BATCH, IC, CONV_IH, CONV_IW };
    mkldnn_dim_t net_dst_sizes[4] = { BATCH, OC, POOL_OH, POOL_OW };

    float *net_src = (float *)malloc(product(net_src_sizes, 4) * sizeof(float));
    float *net_dst = (float *)malloc(product(net_dst_sizes, 4) * sizeof(float));

    init_net_data(net_src, 4, net_src_sizes);
    memset(net_dst, 0, product(net_dst_sizes, 4) * sizeof(float));

    /*----------------------------------------------------------------------*/
    /*----------------- Forward Stream -------------------------------------*/
    /* AlexNet: conv
     * {BATCH, IC, CONV_IH, CONV_IW} (x) {OC, IC, 11, 11} ->
     * {BATCH, OC, CONV_OH, CONV_OW}
     * strides: {CONV_STRIDE, CONV_STRIDE}
     */
    mkldnn_dim_t *conv_user_src_sizes = net_src_sizes;
    mkldnn_dim_t conv_user_weights_sizes[4] = { OC, IC, 11, 11 };
    mkldnn_dim_t conv_bias_sizes[4] = { OC };
    mkldnn_dim_t conv_user_dst_sizes[4] = { BATCH, OC, CONV_OH, CONV_OW };
    mkldnn_dim_t conv_strides[2] = { CONV_STRIDE, CONV_STRIDE };
    mkldnn_dim_t conv_padding[2] = { CONV_PAD, CONV_PAD };

    float *conv_src = net_src;
    float *conv_weights = (float *)malloc(
            product(conv_user_weights_sizes, 4) * sizeof(float));
    float *conv_bias
            = (float *)malloc(product(conv_bias_sizes, 1) * sizeof(float));

    init_net_data(conv_weights, 4, conv_user_weights_sizes);
    init_net_data(conv_bias, 1, conv_bias_sizes);

    /* create memory for user data */
    mkldnn_memory_t conv_user_src_memory, conv_user_weights_memory,
            conv_user_bias_memory;
    init_data_memory(4, conv_user_src_sizes, mkldnn_nchw, mkldnn_f32, engine,
            conv_src, &conv_user_src_memory);
    init_data_memory(4, conv_user_weights_sizes, mkldnn_oihw, mkldnn_f32,
            engine, conv_weights, &conv_user_weights_memory);
    init_data_memory(1, conv_bias_sizes, mkldnn_x, mkldnn_f32, engine,
            conv_bias, &conv_user_bias_memory);

    /* create a convolution */
    mkldnn_primitive_desc_t conv_pd;

    {
        /* create data descriptors for convolution w/ no specified format */
        mkldnn_memory_desc_t conv_src_md, conv_weights_md, conv_bias_md,
                conv_dst_md;
        CHECK(mkldnn_memory_desc_init_by_tag(&conv_src_md, 4,
                conv_user_src_sizes, mkldnn_f32, mkldnn_format_tag_any));
        CHECK(mkldnn_memory_desc_init_by_tag(&conv_weights_md, 4,
                conv_user_weights_sizes, mkldnn_f32, mkldnn_format_tag_any));
        CHECK(mkldnn_memory_desc_init_by_tag(
                &conv_bias_md, 1, conv_bias_sizes, mkldnn_f32, mkldnn_x));
        CHECK(mkldnn_memory_desc_init_by_tag(&conv_dst_md, 4,
                conv_user_dst_sizes, mkldnn_f32, mkldnn_format_tag_any));

        mkldnn_convolution_desc_t conv_any_desc;
        CHECK(mkldnn_convolution_forward_desc_init(&conv_any_desc,
                mkldnn_forward, mkldnn_convolution_direct, &conv_src_md,
                &conv_weights_md, &conv_bias_md, &conv_dst_md, conv_strides,
                conv_padding, conv_padding, mkldnn_padding_zero));

        CHECK(mkldnn_primitive_desc_create(
                &conv_pd, &conv_any_desc, NULL, engine, NULL));
    }

    mkldnn_memory_t conv_internal_src_memory, conv_internal_weights_memory,
            conv_internal_dst_memory;

    /* create memory for dst data, we don't need to reorder it to user data */
    const mkldnn_memory_desc_t *conv_dst_md
            = mkldnn_primitive_desc_query_md(conv_pd, mkldnn_query_dst_md, 0);
    CHECK(mkldnn_memory_create(&conv_internal_dst_memory, conv_dst_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* create reorder primitives between user data and convolution srcs
     * if required */
    mkldnn_primitive_t conv_reorder_src, conv_reorder_weights;

    const mkldnn_memory_desc_t *conv_src_md
            = mkldnn_primitive_desc_query_md(conv_pd, mkldnn_query_src_md, 0);
    CHECK(prepare_reorder(&conv_user_src_memory, conv_src_md, engine, 1,
            &conv_internal_src_memory, &conv_reorder_src, &n_fwd, net_fwd,
            net_fwd_args));

    const mkldnn_memory_desc_t *conv_weights_md
            = mkldnn_primitive_desc_query_md(
                    conv_pd, mkldnn_query_weights_md, 0);
    CHECK(prepare_reorder(&conv_user_weights_memory, conv_weights_md, engine, 1,
            &conv_internal_weights_memory, &conv_reorder_weights, &n_fwd,
            net_fwd, net_fwd_args));

    mkldnn_memory_t conv_src_memory = conv_internal_src_memory ?
            conv_internal_src_memory :
            conv_user_src_memory;
    mkldnn_memory_t conv_weights_memory = conv_internal_weights_memory ?
            conv_internal_weights_memory :
            conv_user_weights_memory;

    /* finally create a convolution primitive */
    mkldnn_primitive_t conv;
    CHECK(mkldnn_primitive_create(&conv, conv_pd));
    net_fwd[n_fwd] = conv;
    prepare_arg_node(&net_fwd_args[n_fwd], 4);
    set_arg(&net_fwd_args[n_fwd].args[0], MKLDNN_ARG_SRC, conv_src_memory);
    set_arg(&net_fwd_args[n_fwd].args[1], MKLDNN_ARG_WEIGHTS,
            conv_weights_memory);
    set_arg(&net_fwd_args[n_fwd].args[2], MKLDNN_ARG_BIAS,
            conv_user_bias_memory);
    set_arg(&net_fwd_args[n_fwd].args[3], MKLDNN_ARG_DST,
            conv_internal_dst_memory);
    n_fwd++;

    /* AlexNet: relu
     * {BATCH, OC, CONV_OH, CONV_OW} -> {BATCH, OC, CONV_OH, CONV_OW}
     */
    float negative_slope = 1.0f;

    /* keep memory format of source same as the format of convolution
     * output in order to avoid reorder */
    const mkldnn_memory_desc_t *relu_src_md = conv_dst_md;

    /* create a relu primitive descriptor */
    mkldnn_eltwise_desc_t relu_desc;
    CHECK(mkldnn_eltwise_forward_desc_init(&relu_desc, mkldnn_forward,
            mkldnn_eltwise_relu, relu_src_md, negative_slope, 0));

    mkldnn_primitive_desc_t relu_pd;
    CHECK(mkldnn_primitive_desc_create(
            &relu_pd, &relu_desc, NULL, engine, NULL));

    /* create relu dst memory */
    mkldnn_memory_t relu_dst_memory;
    const mkldnn_memory_desc_t *relu_dst_md
            = mkldnn_primitive_desc_query_md(relu_pd, mkldnn_query_dst_md, 0);
    CHECK(mkldnn_memory_create(&relu_dst_memory, relu_dst_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* finally create a relu primitive */
    mkldnn_primitive_t relu;
    CHECK(mkldnn_primitive_create(&relu, relu_pd));
    net_fwd[n_fwd] = relu;
    prepare_arg_node(&net_fwd_args[n_fwd], 2);
    set_arg(&net_fwd_args[n_fwd].args[0], MKLDNN_ARG_SRC,
            conv_internal_dst_memory);
    set_arg(&net_fwd_args[n_fwd].args[1], MKLDNN_ARG_DST, relu_dst_memory);
    n_fwd++;

    /* AlexNet: lrn
     * {BATCH, OC, CONV_OH, CONV_OW} -> {BATCH, OC, CONV_OH, CONV_OW}
     * local size: 5
     * alpha: 0.0001
     * beta: 0.75
     * k: 1.0
     */
    uint32_t local_size = 5;
    float alpha = 0.0001f;
    float beta = 0.75f;
    float k = 1.0f;

    /* create lrn src memory descriptor using dst memory descriptor
     *  from previous primitive */
    const mkldnn_memory_desc_t *lrn_src_md = relu_dst_md;

    /* create a lrn primitive descriptor */
    mkldnn_lrn_desc_t lrn_desc;
    CHECK(mkldnn_lrn_forward_desc_init(&lrn_desc, mkldnn_forward,
            mkldnn_lrn_across_channels, lrn_src_md, local_size, alpha, beta,
            k));

    mkldnn_primitive_desc_t lrn_pd;
    CHECK(mkldnn_primitive_desc_create(&lrn_pd, &lrn_desc, NULL, engine, NULL));

    /* create primitives for lrn dst and workspace memory */
    mkldnn_memory_t lrn_dst_memory, lrn_ws_memory;

    const mkldnn_memory_desc_t *lrn_dst_md
            = mkldnn_primitive_desc_query_md(lrn_pd, mkldnn_query_dst_md, 0);
    CHECK(mkldnn_memory_create(&lrn_dst_memory, lrn_dst_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* create workspace only in training and only for forward primitive*/
    /* query lrn_pd for workspace, this memory will be shared with forward lrn*/
    const mkldnn_memory_desc_t *lrn_ws_md = mkldnn_primitive_desc_query_md(
            lrn_pd, mkldnn_query_workspace_md, 0);
    CHECK(mkldnn_memory_create(
            &lrn_ws_memory, lrn_ws_md, engine, MKLDNN_MEMORY_ALLOCATE));

    /* finally create a lrn primitive */
    mkldnn_primitive_t lrn;
    CHECK(mkldnn_primitive_create(&lrn, lrn_pd));
    net_fwd[n_fwd] = lrn;
    prepare_arg_node(&net_fwd_args[n_fwd], 3);
    set_arg(&net_fwd_args[n_fwd].args[0], MKLDNN_ARG_SRC, relu_dst_memory);
    set_arg(&net_fwd_args[n_fwd].args[1], MKLDNN_ARG_DST, lrn_dst_memory);
    set_arg(&net_fwd_args[n_fwd].args[2], MKLDNN_ARG_WORKSPACE, lrn_ws_memory);
    n_fwd++;

    /* AlexNet: pool
     * {BATCH, OC, CONV_OH, CONV_OW} -> {BATCH, OC, POOL_OH, POOL_OW}
     * kernel: {3, 3}
     * strides: {POOL_STRIDE, POOL_STRIDE}
     */
    mkldnn_dim_t *pool_dst_sizes = net_dst_sizes;
    mkldnn_dim_t pool_kernel[2] = { 3, 3 };
    mkldnn_dim_t pool_strides[2] = { POOL_STRIDE, POOL_STRIDE };
    mkldnn_dim_t pool_padding[2] = { POOL_PAD, POOL_PAD };

    /* create memory for user dst data */
    mkldnn_memory_t pool_user_dst_memory;
    init_data_memory(4, pool_dst_sizes, mkldnn_nchw, mkldnn_f32, engine,
            net_dst, &pool_user_dst_memory);

    /* create a pooling primitive descriptor */
    mkldnn_primitive_desc_t pool_pd;

    {
        /* create pooling src memory descriptor using dst descriptor
         *  from previous primitive */
        const mkldnn_memory_desc_t *pool_src_md = lrn_dst_md;

        /* create descriptors for dst pooling data */
        mkldnn_memory_desc_t pool_dst_md;
        CHECK(mkldnn_memory_desc_init_by_tag(&pool_dst_md, 4, pool_dst_sizes,
                mkldnn_f32, mkldnn_format_tag_any));

        mkldnn_pooling_desc_t pool_desc;
        CHECK(mkldnn_pooling_forward_desc_init(&pool_desc, mkldnn_forward,
                mkldnn_pooling_max, pool_src_md, &pool_dst_md, pool_strides,
                pool_kernel, pool_padding, pool_padding, mkldnn_padding_zero));

        CHECK(mkldnn_primitive_desc_create(
                &pool_pd, &pool_desc, NULL, engine, NULL));
    }

    /* create memory for workspace */
    mkldnn_memory_t pool_ws_memory;
    const mkldnn_memory_desc_t *pool_ws_md = mkldnn_primitive_desc_query_md(
            pool_pd, mkldnn_query_workspace_md, 0);
    CHECK(mkldnn_memory_create(&pool_ws_memory, pool_ws_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* create reorder primitives between pooling dsts and user format dst
     * if required */
    mkldnn_primitive_t pool_reorder_dst;
    mkldnn_memory_t pool_internal_dst_memory;
    const mkldnn_memory_desc_t *pool_dst_md
            = mkldnn_primitive_desc_query_md(pool_pd, mkldnn_query_dst_md, 0);
    n_fwd += 1; /* tentative workaround: preserve space for pooling that should
                                         happen before the reorder */
    CHECK(prepare_reorder(&pool_user_dst_memory, pool_dst_md, engine, 0,
            &pool_internal_dst_memory, &pool_reorder_dst, &n_fwd, net_fwd,
            net_fwd_args));
    n_fwd -= pool_reorder_dst ? 2 : 1;

    mkldnn_memory_t pool_dst_memory = pool_internal_dst_memory ?
            pool_internal_dst_memory :
            pool_user_dst_memory;

    /* finally create a pooling primitive */
    mkldnn_primitive_t pool;
    CHECK(mkldnn_primitive_create(&pool, pool_pd));
    net_fwd[n_fwd] = pool;
    prepare_arg_node(&net_fwd_args[n_fwd], 3);
    set_arg(&net_fwd_args[n_fwd].args[0], MKLDNN_ARG_SRC, lrn_dst_memory);
    set_arg(&net_fwd_args[n_fwd].args[1], MKLDNN_ARG_DST, pool_dst_memory);
    set_arg(&net_fwd_args[n_fwd].args[2], MKLDNN_ARG_WORKSPACE, pool_ws_memory);
    n_fwd++;

    if (pool_reorder_dst)
        n_fwd += 1;

    /*-----------------------------------------------------------------------*/
    /*----------------- Backward Stream -------------------------------------*/
    /*-----------------------------------------------------------------------*/

    /* ... user diff_data ...*/
    float *net_diff_dst
            = (float *)malloc(product(pool_dst_sizes, 4) * sizeof(float));

    init_net_data(net_diff_dst, 4, pool_dst_sizes);

    /* create memory for user diff dst data*/
    mkldnn_memory_t pool_user_diff_dst_memory;
    init_data_memory(4, pool_dst_sizes, mkldnn_nchw, mkldnn_f32, engine,
            net_diff_dst, &pool_user_diff_dst_memory);

    /* Pooling Backward */
    /* pooling diff src memory descriptor */
    const mkldnn_memory_desc_t *pool_diff_src_md = lrn_dst_md;

    /* pooling diff dst memory descriptor */
    const mkldnn_memory_desc_t *pool_diff_dst_md = pool_dst_md;

    /* create backward pooling descriptor */
    mkldnn_pooling_desc_t pool_bwd_desc;
    CHECK(mkldnn_pooling_backward_desc_init(&pool_bwd_desc, mkldnn_pooling_max,
            pool_diff_src_md, pool_diff_dst_md, pool_strides, pool_kernel,
            pool_padding, pool_padding, mkldnn_padding_zero));

    /* backward primitive descriptor needs to hint forward descriptor*/
    mkldnn_primitive_desc_t pool_bwd_pd;
    CHECK(mkldnn_primitive_desc_create(
            &pool_bwd_pd, &pool_bwd_desc, NULL, engine, pool_pd));

    /* create reorder primitive between user diff dst and pool diff dst
     * if required*/
    mkldnn_memory_t pool_diff_dst_memory, pool_internal_diff_dst_memory;
    mkldnn_primitive_t pool_reorder_diff_dst;
    CHECK(prepare_reorder(&pool_user_diff_dst_memory, pool_diff_dst_md, engine,
            1, &pool_internal_diff_dst_memory, &pool_reorder_diff_dst, &n_bwd,
            net_bwd, net_bwd_args));

    pool_diff_dst_memory = pool_internal_diff_dst_memory ?
            pool_internal_diff_dst_memory :
            pool_user_diff_dst_memory;

    /* create memory for pool diff src data */
    mkldnn_memory_t pool_diff_src_memory;
    CHECK(mkldnn_memory_create(&pool_diff_src_memory, pool_diff_src_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* finally create backward pooling primitive */
    mkldnn_primitive_t pool_bwd;
    CHECK(mkldnn_primitive_create(&pool_bwd, pool_bwd_pd));
    net_bwd[n_bwd] = pool_bwd;
    prepare_arg_node(&net_bwd_args[n_bwd], 3);
    set_arg(&net_bwd_args[n_bwd].args[0], MKLDNN_ARG_DIFF_DST,
            pool_diff_dst_memory);
    set_arg(&net_bwd_args[n_bwd].args[1], MKLDNN_ARG_WORKSPACE, pool_ws_memory);
    set_arg(&net_bwd_args[n_bwd].args[2], MKLDNN_ARG_DIFF_SRC,
            pool_diff_src_memory);
    n_bwd++;

    /* Backward lrn */
    const mkldnn_memory_desc_t *lrn_diff_dst_md = pool_diff_src_md;

    /* create backward lrn descriptor */
    mkldnn_lrn_desc_t lrn_bwd_desc;
    CHECK(mkldnn_lrn_backward_desc_init(&lrn_bwd_desc,
            mkldnn_lrn_across_channels, lrn_src_md, lrn_diff_dst_md, local_size,
            alpha, beta, k));

    mkldnn_primitive_desc_t lrn_bwd_pd;
    CHECK(mkldnn_primitive_desc_create(
            &lrn_bwd_pd, &lrn_bwd_desc, NULL, engine, lrn_pd));

    /* create memory for lrn diff src */
    mkldnn_memory_t lrn_diff_src_memory;
    const mkldnn_memory_desc_t *lrn_diff_src_md
            = mkldnn_primitive_desc_query_md(
                    lrn_bwd_pd, mkldnn_query_diff_src_md, 0);
    CHECK(mkldnn_memory_create(&lrn_diff_src_memory, lrn_diff_src_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* finally create backward lrn primitive */
    mkldnn_primitive_t lrn_bwd;
    CHECK(mkldnn_primitive_create(&lrn_bwd, lrn_bwd_pd));
    net_bwd[n_bwd] = lrn_bwd;
    prepare_arg_node(&net_bwd_args[n_bwd], 4);
    set_arg(&net_bwd_args[n_bwd].args[0], MKLDNN_ARG_SRC, relu_dst_memory);
    set_arg(&net_bwd_args[n_bwd].args[1], MKLDNN_ARG_DIFF_DST,
            pool_diff_src_memory);
    set_arg(&net_bwd_args[n_bwd].args[2], MKLDNN_ARG_WORKSPACE, lrn_ws_memory);
    set_arg(&net_bwd_args[n_bwd].args[3], MKLDNN_ARG_DIFF_SRC,
            lrn_diff_src_memory);
    n_bwd++;

    /* Backward relu */
    const mkldnn_memory_desc_t *relu_diff_dst_md = lrn_diff_src_md;

    /* create backward relu descriptor */
    mkldnn_eltwise_desc_t relu_bwd_desc;
    CHECK(mkldnn_eltwise_backward_desc_init(&relu_bwd_desc, mkldnn_eltwise_relu,
            relu_diff_dst_md, relu_src_md, negative_slope, 0));

    mkldnn_primitive_desc_t relu_bwd_pd;
    CHECK(mkldnn_primitive_desc_create(
            &relu_bwd_pd, &relu_bwd_desc, NULL, engine, relu_pd));

    /* create memory for relu diff src */
    mkldnn_memory_t relu_diff_src_memory;
    const mkldnn_memory_desc_t *relu_diff_src_md
            = mkldnn_primitive_desc_query_md(
                    relu_bwd_pd, mkldnn_query_diff_src_md, 0);
    CHECK(mkldnn_memory_create(&relu_diff_src_memory, relu_diff_src_md, engine,
            MKLDNN_MEMORY_ALLOCATE));

    /* finally create backward relu primitive */
    mkldnn_primitive_t relu_bwd;
    CHECK(mkldnn_primitive_create(&relu_bwd, relu_bwd_pd));
    net_bwd[n_bwd] = relu_bwd;
    prepare_arg_node(&net_bwd_args[n_bwd], 3);
    set_arg(&net_bwd_args[n_bwd].args[0], MKLDNN_ARG_SRC,
            conv_internal_dst_memory);
    set_arg(&net_bwd_args[n_bwd].args[1], MKLDNN_ARG_DIFF_DST,
            lrn_diff_src_memory);
    set_arg(&net_bwd_args[n_bwd].args[2], MKLDNN_ARG_DIFF_SRC,
            relu_diff_src_memory);
    n_bwd++;

    /* Backward convolution with respect to weights */
    float *conv_diff_bias_buffer
            = (float *)malloc(product(conv_bias_sizes, 1) * sizeof(float));
    float *conv_user_diff_weights_buffer = (float *)malloc(
            product(conv_user_weights_sizes, 4) * sizeof(float));

    /* initialize memory for diff weights in user format */
    mkldnn_memory_t conv_user_diff_weights_memory;
    init_data_memory(4, conv_user_weights_sizes, mkldnn_oihw, mkldnn_f32,
            engine, conv_user_diff_weights_buffer,
            &conv_user_diff_weights_memory);

    /* create backward convolution primitive descriptor */
    mkldnn_primitive_desc_t conv_bwd_weights_pd;

    {
        /* memory descriptors should be in format `any` to allow backward
         * convolution for
         * weights to chose the format it prefers for best performance */
        mkldnn_memory_desc_t conv_diff_src_md, conv_diff_weights_md,
                conv_diff_bias_md, conv_diff_dst_md;
        CHECK(mkldnn_memory_desc_init_by_tag(&conv_diff_src_md, 4,
                conv_user_src_sizes, mkldnn_f32, mkldnn_format_tag_any));
        CHECK(mkldnn_memory_desc_init_by_tag(&conv_diff_weights_md, 4,
                conv_user_weights_sizes, mkldnn_f32, mkldnn_format_tag_any));
        CHECK(mkldnn_memory_desc_init_by_tag(
                &conv_diff_bias_md, 1, conv_bias_sizes, mkldnn_f32, mkldnn_x));
        CHECK(mkldnn_memory_desc_init_by_tag(&conv_diff_dst_md, 4,
                conv_user_dst_sizes, mkldnn_f32, mkldnn_format_tag_any));

        /* create backward convolution descriptor */
        mkldnn_convolution_desc_t conv_bwd_weights_desc;
        CHECK(mkldnn_convolution_backward_weights_desc_init(
                &conv_bwd_weights_desc, mkldnn_convolution_direct,
                &conv_diff_src_md, &conv_diff_weights_md, &conv_diff_bias_md,
                &conv_diff_dst_md, conv_strides, conv_padding, conv_padding,
                mkldnn_padding_zero));

        CHECK(mkldnn_primitive_desc_create(&conv_bwd_weights_pd,
                &conv_bwd_weights_desc, NULL, engine, conv_pd));
    }

    /* for best performance convolution backward might chose
     * different memory format for src and diff_dst
     * than the memory formats preferred by forward convolution
     * for src and dst respectively */
    /* create reorder primitives for src from forward convolution to the
     * format chosen by backward convolution */
    mkldnn_primitive_t conv_bwd_reorder_src;
    mkldnn_memory_t conv_bwd_internal_src_memory;
    const mkldnn_memory_desc_t *conv_diff_src_md
            = mkldnn_primitive_desc_query_md(
                    conv_bwd_weights_pd, mkldnn_query_src_md, 0);
    CHECK(prepare_reorder(&conv_src_memory, conv_diff_src_md, engine, 1,
            &conv_bwd_internal_src_memory, &conv_bwd_reorder_src, &n_bwd,
            net_bwd, net_bwd_args));

    mkldnn_memory_t conv_bwd_weights_src_memory = conv_bwd_internal_src_memory ?
            conv_bwd_internal_src_memory :
            conv_src_memory;

    /* create reorder primitives for diff_dst between diff_src from relu_bwd
     * and format preferred by conv_diff_weights */
    mkldnn_primitive_t conv_reorder_diff_dst;
    mkldnn_memory_t conv_internal_diff_dst_memory;
    const mkldnn_memory_desc_t *conv_diff_dst_md
            = mkldnn_primitive_desc_query_md(
                    conv_bwd_weights_pd, mkldnn_query_diff_dst_md, 0);

    CHECK(prepare_reorder(&relu_diff_src_memory, conv_diff_dst_md, engine, 1,
            &conv_internal_diff_dst_memory, &conv_reorder_diff_dst, &n_bwd,
            net_bwd, net_bwd_args));

    mkldnn_memory_t conv_diff_dst_memory = conv_internal_diff_dst_memory ?
            conv_internal_diff_dst_memory :
            relu_diff_src_memory;

    /* create reorder primitives for conv diff weights memory */
    mkldnn_primitive_t conv_reorder_diff_weights;
    mkldnn_memory_t conv_internal_diff_weights_memory;
    const mkldnn_memory_desc_t *conv_diff_weights_md
            = mkldnn_primitive_desc_query_md(
                    conv_bwd_weights_pd, mkldnn_query_diff_weights_md, 0);
    n_bwd += 1; /* tentative workaround: preserve space for conv_bwd_weights
                                         that should happen before the reorder
                 */
    CHECK(prepare_reorder(&conv_user_diff_weights_memory, conv_diff_weights_md,
            engine, 0, &conv_internal_diff_weights_memory,
            &conv_reorder_diff_weights, &n_bwd, net_bwd, net_bwd_args));
    n_bwd -= conv_reorder_diff_weights ? 2 : 1;

    mkldnn_memory_t conv_diff_weights_memory
            = conv_internal_diff_weights_memory ?
            conv_internal_diff_weights_memory :
            conv_user_diff_weights_memory;

    /* create memory for diff bias memory */
    mkldnn_memory_t conv_diff_bias_memory;
    const mkldnn_memory_desc_t *conv_diff_bias_md
            = mkldnn_primitive_desc_query_md(
                    conv_bwd_weights_pd, mkldnn_query_diff_weights_md, 1);
    CHECK(mkldnn_memory_create(
            &conv_diff_bias_memory, conv_diff_bias_md, engine, NULL));
    CHECK(mkldnn_memory_set_data_handle(
            conv_diff_bias_memory, conv_diff_bias_buffer));

    /* finally created backward convolution weights primitive */
    mkldnn_primitive_t conv_bwd_weights;
    CHECK(mkldnn_primitive_create(&conv_bwd_weights, conv_bwd_weights_pd));
    net_bwd[n_bwd] = conv_bwd_weights;
    prepare_arg_node(&net_bwd_args[n_bwd], 4);
    set_arg(&net_bwd_args[n_bwd].args[0], MKLDNN_ARG_SRC,
            conv_bwd_weights_src_memory);
    set_arg(&net_bwd_args[n_bwd].args[1], MKLDNN_ARG_DIFF_DST,
            conv_diff_dst_memory);
    set_arg(&net_bwd_args[n_bwd].args[2], MKLDNN_ARG_DIFF_WEIGHTS,
            conv_diff_weights_memory);
    set_arg(&net_bwd_args[n_bwd].args[3], MKLDNN_ARG_DIFF_BIAS,
            conv_diff_bias_memory);
    n_bwd++;

    if (conv_reorder_diff_weights)
        n_bwd += 1;

    // output from backward stream
    void *net_diff_weights = NULL;
    void *net_diff_bias = NULL;

    int n_iter = 10; // number of iterations for training.
    mkldnn_stream_t stream;
    CHECK(mkldnn_stream_create(&stream, engine, mkldnn_stream_default_flags));
    /* Execute the net */
    for (int i = 0; i < n_iter; i++) {
        for (uint32_t i = 0; i < n_fwd; ++i)
            CHECK(mkldnn_primitive_execute(net_fwd[i], stream,
                    net_fwd_args[i].nargs, net_fwd_args[i].args));

        /* Update net_diff_dst */
        void *net_output = NULL; // output from forward stream:
        CHECK(mkldnn_memory_get_data_handle(pool_user_dst_memory, &net_output));
        /*...user updates net_diff_dst using net_output...*/
        // some user defined func update_diff_dst(net_diff_dst, net_output)

        /* Backward pass */
        for (uint32_t i = 0; i < n_bwd; ++i)
            CHECK(mkldnn_primitive_execute(net_bwd[i], stream,
                    net_bwd_args[i].nargs, net_bwd_args[i].args));

        /*... update weights ... */
        CHECK(mkldnn_memory_get_data_handle(
                conv_user_diff_weights_memory, &net_diff_weights));
        CHECK(mkldnn_memory_get_data_handle(
                conv_diff_bias_memory, &net_diff_bias));
        /* ...user updates weights and bias using diff weights and bias...*/
        // some user defined func update_weights(conv_user_weights_memory,
        // conv_bias_memory,
        //      net_diff_weights, net_diff_bias);
    }
    CHECK(mkldnn_stream_wait(stream));

    mkldnn_stream_destroy(stream);

    /* clean up nets */
    for (uint32_t i = 0; i < n_fwd; ++i)
        free_arg_node(&net_fwd_args[i]);
    for (uint32_t i = 0; i < n_bwd; ++i)
        free_arg_node(&net_bwd_args[i]);

    /* Cleanup forward */
    CHECK(mkldnn_primitive_desc_destroy(pool_pd));
    CHECK(mkldnn_primitive_desc_destroy(lrn_pd));
    CHECK(mkldnn_primitive_desc_destroy(relu_pd));
    CHECK(mkldnn_primitive_desc_destroy(conv_pd));

    free(net_src);
    free(net_dst);

    mkldnn_memory_destroy(conv_user_src_memory);
    mkldnn_memory_destroy(conv_user_weights_memory);
    mkldnn_memory_destroy(conv_user_bias_memory);
    mkldnn_memory_destroy(conv_internal_src_memory);
    mkldnn_memory_destroy(conv_internal_weights_memory);
    mkldnn_memory_destroy(conv_internal_dst_memory);
    mkldnn_primitive_destroy(conv_reorder_src);
    mkldnn_primitive_destroy(conv_reorder_weights);
    mkldnn_primitive_destroy(conv);

    free(conv_weights);
    free(conv_bias);

    mkldnn_memory_destroy(relu_dst_memory);
    mkldnn_primitive_destroy(relu);

    mkldnn_memory_destroy(lrn_ws_memory);
    mkldnn_memory_destroy(lrn_dst_memory);
    mkldnn_primitive_destroy(lrn);

    mkldnn_memory_destroy(pool_user_dst_memory);
    mkldnn_memory_destroy(pool_internal_dst_memory);
    mkldnn_memory_destroy(pool_ws_memory);
    mkldnn_primitive_destroy(pool_reorder_dst);
    mkldnn_primitive_destroy(pool);

    /* Cleanup backward */
    CHECK(mkldnn_primitive_desc_destroy(pool_bwd_pd));
    CHECK(mkldnn_primitive_desc_destroy(lrn_bwd_pd));
    CHECK(mkldnn_primitive_desc_destroy(relu_bwd_pd));
    CHECK(mkldnn_primitive_desc_destroy(conv_bwd_weights_pd));

    mkldnn_memory_destroy(pool_user_diff_dst_memory);
    mkldnn_memory_destroy(pool_diff_src_memory);
    mkldnn_memory_destroy(pool_internal_diff_dst_memory);
    mkldnn_primitive_destroy(pool_reorder_diff_dst);
    mkldnn_primitive_destroy(pool_bwd);

    free(net_diff_dst);

    mkldnn_memory_destroy(lrn_diff_src_memory);
    mkldnn_primitive_destroy(lrn_bwd);

    mkldnn_memory_destroy(relu_diff_src_memory);
    mkldnn_primitive_destroy(relu_bwd);

    mkldnn_memory_destroy(conv_user_diff_weights_memory);
    mkldnn_memory_destroy(conv_diff_bias_memory);
    mkldnn_memory_destroy(conv_bwd_internal_src_memory);
    mkldnn_primitive_destroy(conv_bwd_reorder_src);
    mkldnn_memory_destroy(conv_internal_diff_dst_memory);
    mkldnn_primitive_destroy(conv_reorder_diff_dst);
    mkldnn_memory_destroy(conv_internal_diff_weights_memory);
    mkldnn_primitive_destroy(conv_reorder_diff_weights);
    mkldnn_primitive_destroy(conv_bwd_weights);

    free(conv_diff_bias_buffer);
    free(conv_user_diff_weights_buffer);

    mkldnn_engine_destroy(engine);

    return mkldnn_success;
}
コード例 #24
0
ファイル: functions.cpp プロジェクト: MechCoder/csympy
Sec::Sec(const RCP<const Basic> &arg)
{
    CSYMPY_ASSERT(is_canonical(arg))
    set_arg(arg);
}
コード例 #25
0
ファイル: kernel.hpp プロジェクト: sajis997/compute
 void set_arg(size_t index, const T &value)
 {
     set_arg(index, sizeof(value), &value);
 }
コード例 #26
0
ファイル: iniconfig.c プロジェクト: kerheol/dingux-hugo
void
parse_commandline (int argc, char **argv)
{
  char next_arg, i, arg_error = 0;

  Log ("--[ PARSING COMMAND LINE ]--------------------------\n");

#if 0 // !defined(WIN32) && !defined(SOLARIS) && !defined(NGC) && !defined(PSP)
  argp_parse (&argp, argc, argv, 0, 0, &option);
#else
  next_arg = 0;
  for (i = 1; i < argc; i++)
    if (!next_arg)
      {
	if (argv[i][0] == '-')
	  {
	    if (strlen (argv[i]) == 2)
	      {
		switch (argv[i][1])
		  {
		  default:
		    next_arg = argv[i][1];
		    break;
		  }
	      }
	    else
	      arg_error |= set_arg (argv[i][1], (char *) &argv[i][2]);
	  }
	else
	  {
	    if (!cart_name[0])
	      {
		strcpy (cart_name, argv[i]);
		Log ("Setting card name to %s\n", cart_name);
		{
		  int x;
		  for (x = 0; x < strlen (cart_name); x++)
		    if (cart_name[x] == '\\')
		      cart_name[x] = '/';
		}
	      }
	    else if (!bmdefault)
	      {
		Log ("Setting backup mem file name to %s\n", argv[i]);
		bmdefault = argv[i];
	      }
	    else
	      {
		Log ("Unrecognized option : %s\n", argv[i]);
		arg_error = 1;
	      };
	  }
      }
    else
      {
	arg_error |= set_arg (next_arg, argv[i]);
	next_arg = 0;
      }

  if (next_arg)
    {
      Log ("No value for last arg : %c\n", next_arg);
      next_arg = 0;
      arg_error = 1;
    };

#endif

  Log ("End of parsing command line\n");

  video_driver = 0;

  if (use_eagle)
    video_driver = 1;
  else if (use_scanline)
    video_driver = 2;
}
コード例 #27
0
ファイル: kernel.hpp プロジェクト: sajis997/compute
    /// \internal_
    ///
    /// specialization for buffer, image2d and image3d
    void set_arg(size_t index, const memory_object &mem)
    {
        BOOST_ASSERT(mem.get_context() == this->get_context());

        set_arg(index, sizeof(cl_mem), static_cast<const void *>(&mem.get()));
    }
コード例 #28
0
ファイル: main.cpp プロジェクト: avr-aics-riken/PFClib
int main( int argc, char **argv )
{

  //MPI Initialize
  if( MPI_Init(&argc,&argv) != MPI_SUCCESS )
  {
      std::cerr << "MPI_Init error." << std::endl;
      return false;
  }

  Staging STG;

  //引数の取り出し&セット
  set_arg(STG,argc,argv);

  CIO::E_CIO_ERRORCODE ret;
  vector<int> readRankList; ///< 読込みランクリスト

  //読込みDFIファイルのループ
  for( int i=0; i<STG.m_dfi_fname.size(); i++ ) {

    printf("---- i=%d  m_dfi_fname=%s\n",i,STG.m_dfi_fname[i].c_str() );
    fflush(stdout);

    //初期化、ファイルの読込み、DFIのインスタンス  
    if( !STG.Initial(STG.m_infofile, STG.m_dfi_fname[i]) ) {
      printf("ERROR Initial()\n");
      return 0;
    }

    //DFIのdirectory path get
    STG.m_inPath = CIO::cioPath_DirName(STG.m_dfi_fname[i]);
    //printf("   STG.m_inPath = %s\n",STG.m_inPath.c_str() );

    bool isSameDiv = true;        ///< 分割数フラグ true:1x1 false:MxN
    bool isSame = true;           ///< 粗密フラグ true:密 false:粗
    CIO::E_CIO_READTYPE readflag; ///< 読込み判定フラグ

    // 分割数フラグの設定
    for(int j=0; j<3; j++ ) {
      if( STG.m_Gdiv[j] != STG.dfi_Domain->GlobalDivision[j] ) 
      {
        isSameDiv = false;
      }
    }

    // 粗密フラグの設定
    if( STG.CheckGlobalVoxel(STG.m_GVoxel,(int *)STG.dfi_Domain->GlobalVoxel) == STG_E_GV_SAME ) 
    {
      isSame = true;
    } 
    else if( STG.CheckGlobalVoxel(STG.m_GVoxel,(int *)STG.dfi_Domain->GlobalVoxel) == STG_E_GVX2_SAME )
    {
      isSame = false;
    } else {
     printf("ERROR Dimension size : %d %d %d\n",STG.m_GVoxel[0],STG.m_GVoxel[1],STG.m_GVoxel[2]);
     return 0;
    }

    //読込み判定フラグの設定
    if( isSameDiv == true )
    {
      if( isSame == true ) 
      {
        readflag = CIO::E_CIO_SAMEDIV_SAMERES;
      } 
      else 
      {
        readflag = CIO::E_CIO_SAMEDIV_REFINEMENT;
      }
    } 
    else 
    {
      if( isSame == true )
      {
        readflag = CIO::E_CIO_DIFFDIV_SAMERES;
      }
      else
      {
        readflag = CIO::E_CIO_DIFFDIV_REFINEMENT;
      }
    }

    int numRank = STG.m_GRankInfo.size();

    //ランクマップの生成
    int* rankMap = STG.CreateRankMap();

    //STG.m_GRankInfoの生成
    STG.m_HeadTail=NULL;
    if( numRank>0 ) {
      if( numRank != STG.m_NumberOfRank ) {
        printf("ERROR MISMATCH NumberOfRank\n");
        return 0;
      }
      STG.m_HeadTail = new int[numRank][6];
      //ランク毎のXYZ方向のheadとtaileテーブルの生成
      if( !STG.CreateHeadTail(rankMap,STG.m_GRankInfo) ) {
        printf("ERROR CreateHeadTail()\n");
        return 0;
      }
    } else {
      size_t ndiv = STG.m_Gdiv[0]*STG.m_Gdiv[1]*STG.m_Gdiv[2];
      STG.m_HeadTail = new int[ndiv][6];
      //ランク毎のXYZ方向のheadとtaileテーブルの生成
      if( !STG.CreateHeadTail(rankMap) ) {
        printf("ERROR CreateHeadTail()\n");
        return 0;
      }
    }

    //並列数のセット
    numRank=STG.m_GRankInfo.size();

    //並列数のループ
    char tmp[20];
    if( STG.m_outPath == "" ) STG.m_outPath=".";
    int len = STG.m_outPath.size()+7;

    //printf("---- i=%d  numRank=%d\n",i,numRank );
    //fflush(stdout);

    for(int j=0; j<numRank; j++) {
       //printf("  ---- i=%d  j=%d\n",i,j );
       //fflush(stdout);

       //読込みランクリストの生成
       readRankList.clear();

       cio_Domain domain;
       for(int k=0; k<3; k++ ) {
         domain.GlobalOrigin[k] = STG.dfi_Domain->GlobalOrigin[k];
         domain.GlobalRegion[k] = STG.dfi_Domain->GlobalRegion[k];
         domain.GlobalVoxel[k]  = STG.dfi_Domain->GlobalVoxel[k];
         domain.GlobalDivision[k]  = STG.dfi_Domain->GlobalDivision[k];
       }
       domain.ActiveSubdomainFile = STG.dfi_Domain->ActiveSubdomainFile;

       ret=STG.dfi_Process->CheckReadRank(domain,
           (const int *)STG.m_GRankInfo[j].HeadIndex,
           (const int *)STG.m_GRankInfo[j].TailIndex,readflag,readRankList);

       if( ret != CIO::E_CIO_SUCCESS ) return 0;

       //ファイルのコピー
       //printf("  ---- i=%d  j=%d  STG.FileCopy(() Start\n",i,j );
       //fflush(stdout);

       STG.FileCopy(readRankList,STG.m_GRankInfo[j].RankID);

       //printf("  ---- i=%d  j=%d  STG.FileCopy(() End\n",i,j );
       //fflush(stdout);

    }

    //dfiファイル出力
    //printf("  ---- i=%d STG.OutputDFI(() Start\n",i );
    //fflush(stdout);

    if( !STG.OutputDFI(STG.m_dfi_fname[i], rankMap) ) {
      printf("EEROR OutputDFI()\n");
      return 0;
    }

    //printf("  ---- i=%d STG.OutputDFI(() End\n",i );
    //fflush(stdout);
  }

  printf("####### normal end ######\n");

  return 1;
}
コード例 #29
0
ファイル: kernel.hpp プロジェクト: sajis997/compute
    /// \internal_
    ///
    /// specialization for image samplers
    void set_arg(size_t index, const image_sampler &sampler)
    {
        cl_sampler sampler_ = cl_sampler(sampler);

        set_arg(index, sizeof(cl_sampler), static_cast<const void *>(&sampler_));
    }
コード例 #30
0
static void set_int_arg(int val, int pos)
{
	
	set_arg(PyInt_FromLong((long)val), pos);
}