/* Equality of self and other in view of numerical array. i.e., both arrays have same shape and corresponding elements are equal. @overload == other @param [Object] other @return [Boolean] true if self and other is equal. */ VALUE na_equal(VALUE self, volatile VALUE other) { volatile VALUE vbool; narray_t *na1, *na2; int i; GetNArray(self,na1); if (!rb_obj_is_kind_of(other,cNArray)) { other = rb_funcall(CLASS_OF(self), rb_intern("cast"), 1, other); } GetNArray(other,na2); if (na1->ndim != na2->ndim) { return Qfalse; } for (i=0; i<na1->ndim; i++) { if (na1->shape[i] != na2->shape[i]) { return Qfalse; } } vbool = rb_funcall(self, rb_intern("eq"), 1, other); return (rb_funcall(vbool, rb_intern("count_false"), 0)==INT2FIX(0)) ? Qtrue : Qfalse; }
/* @overload initialize( inputs, targets ) * Creates a new dataset from example data. * @param [NArray<sfloat>] inputs the input examples that a nn_model will process * @param [NArray<sfloat>] targets known outputs that can be used to train or assess a nn_model * @return [RuNeNe::DataSet] new dataset */ VALUE dataset_class_initialize( VALUE self, VALUE rv_inputs, VALUE rv_targets ) { volatile VALUE val_inputs; volatile VALUE val_targets; struct NARRAY *na_inputs; struct NARRAY *na_targets; DataSet *dataset = get_dataset_struct( self ); val_inputs = na_cast_object( rv_inputs, NA_SFLOAT ); GetNArray( val_inputs, na_inputs ); val_targets = na_cast_object( rv_targets, NA_SFLOAT ); GetNArray( val_targets, na_targets ); if ( na_inputs->rank < 2 ) { rb_raise( rb_eArgError, "Inputs rank should be at least 2, but got %d", na_inputs->rank ); } if ( na_targets->rank < 2 ) { rb_raise( rb_eArgError, "Targets rank should be at least 2, but got %d", na_targets->rank ); } if ( na_inputs->shape[ na_inputs->rank - 1 ] != na_targets->shape[ na_targets->rank - 1 ] ) { rb_raise( rb_eArgError, "Number of input items %d not same as target items %d", na_inputs->shape[ na_inputs->rank - 1 ], na_targets->shape[ na_targets->rank - 1 ] ); } dataset__init_from_narray( dataset, val_inputs, val_targets ); return self; }
/* @overload run( input ) * Runs nn_model forward and generates a result * @param [NArray<sfloat>] input single input vector * @return [NArray<sfloat>] output of nn_model */ VALUE nn_model_rbobject__run( VALUE self, VALUE rv_input ) { NNModel *nn_model = get_nn_model_struct( self ); int out_shape[1] = { nn_model->num_outputs }; struct NARRAY *na_input; volatile VALUE val_input = na_cast_object(rv_input, NA_SFLOAT); GetNArray( val_input, na_input ); // Shouldn't happen, but we don't want a segfault if ( nn_model->num_layers < 1 ) { return Qnil; } if ( na_input->total != nn_model->num_inputs ) { rb_raise( rb_eArgError, "Input array must be size %d, but it was size %d", nn_model->num_inputs, na_input->total ); } struct NARRAY *na_output; volatile VALUE val_output = na_make_object( NA_SFLOAT, 1, out_shape, cNArray ); GetNArray( val_output, na_output ); nn_model__run( nn_model, (float*) na_input->ptr ); memcpy( (float*) na_output->ptr, nn_model->activations[nn_model->num_layers-1], nn_model->num_outputs * sizeof(float) ); return val_output; }
static VALUE rb_gsl_spline_init(VALUE obj, VALUE xxa, VALUE yya) { rb_gsl_spline *sp = NULL; gsl_spline *p = NULL; gsl_vector *xa = NULL, *ya = NULL; size_t i, size; int flagx = 0, flagy = 0; double *ptr1 = NULL, *ptr2 = NULL; #ifdef HAVE_NARRAY_H struct NARRAY *nax = NULL, *nay = NULL; #endif Data_Get_Struct(obj, rb_gsl_spline, sp); p = sp->s; if (TYPE(xxa) == T_ARRAY) { size = RARRAY_LEN(xxa); xa = gsl_vector_alloc(size); for (i = 0; i < size; i++) gsl_vector_set(xa, i, NUM2DBL(rb_ary_entry(xxa, i))); ptr1 = xa->data; flagx = 1; } else if (VECTOR_P(xxa)) { Data_Get_Struct(xxa, gsl_vector, xa); size = xa->size; ptr1 = xa->data; #ifdef HAVE_NARRAY_H } else if (NA_IsNArray(xxa)) { GetNArray(xxa, nax); size = nax->total; ptr1 = (double *) nax->ptr; #endif } else { rb_raise(rb_eTypeError, "not a vector"); } if (TYPE(yya) == T_ARRAY) { ya = gsl_vector_alloc(size); for (i = 0; i < size; i++) gsl_vector_set(ya, i, NUM2DBL(rb_ary_entry(yya, i))); ptr2 = ya->data; flagy = 1; #ifdef HAVE_NARRAY_H } else if (NA_IsNArray(yya)) { GetNArray(yya, nay); ptr2 = (double *) nay->ptr; #endif } else if (VECTOR_P(yya)) { Data_Get_Struct(yya, gsl_vector, ya); ptr2 = ya->data; } else { rb_raise(rb_eTypeError, "not a vector"); } gsl_spline_init(p, ptr1, ptr2, size); if (flagx == 1) gsl_vector_free(xa); if (flagy == 1) gsl_vector_free(ya); return obj; }
void na_copy_flags(VALUE src, VALUE dst) { narray_t *na1, *na2; GetNArray(src,na1); GetNArray(dst,na2); na2->flag[0] = na1->flag[0]; na2->flag[1] = na1->flag[1]; RBASIC(dst)->flags |= (RBASIC(src)->flags) & (FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7); }
void mbgd_layer__deep_copy( MBGDLayer *mbgd_layer_copy, MBGDLayer *mbgd_layer_orig ) { struct NARRAY *narr; GradientDescent_SGD * gd_sgd; GradientDescent_NAG * gd_nag; GradientDescent_RMSProp * gd_rmsprop; mbgd_layer_copy->num_inputs = mbgd_layer_orig->num_inputs; mbgd_layer_copy->num_outputs = mbgd_layer_orig->num_outputs; mbgd_layer_copy->learning_rate = mbgd_layer_orig->learning_rate; mbgd_layer_copy->gradient_descent_type = mbgd_layer_orig->gradient_descent_type; switch ( mbgd_layer_copy->gradient_descent_type ) { case GD_TYPE_SGD: Data_Get_Struct( mbgd_layer_orig->gradient_descent, GradientDescent_SGD, gd_sgd ); mbgd_layer_copy->gradient_descent = Data_Wrap_Struct( RuNeNe_GradientDescent_SGD, gd_sgd__gc_mark, gd_sgd__destroy, gd_sgd__clone( gd_sgd ) ); break; case GD_TYPE_NAG: Data_Get_Struct( mbgd_layer_orig->gradient_descent, GradientDescent_NAG, gd_nag ); mbgd_layer_copy->gradient_descent = Data_Wrap_Struct( RuNeNe_GradientDescent_NAG, gd_nag__gc_mark, gd_nag__destroy, gd_nag__clone( gd_nag ) ); break; case GD_TYPE_RMSPROP: Data_Get_Struct( mbgd_layer_orig->gradient_descent, GradientDescent_RMSProp, gd_rmsprop ); mbgd_layer_copy->gradient_descent = Data_Wrap_Struct( RuNeNe_GradientDescent_RMSProp, gd_rmsprop__gc_mark, gd_rmsprop__destroy, gd_rmsprop__clone( gd_rmsprop ) ); break; } mbgd_layer_copy->max_norm = mbgd_layer_orig->max_norm; mbgd_layer_copy->weight_decay = mbgd_layer_orig->weight_decay; mbgd_layer_copy->narr_de_dz = na_clone( mbgd_layer_orig->narr_de_dz ); GetNArray( mbgd_layer_copy->narr_de_dz, narr ); mbgd_layer_copy->de_dz = (float *) narr->ptr; mbgd_layer_copy->narr_de_da = na_clone( mbgd_layer_orig->narr_de_da ); GetNArray( mbgd_layer_copy->narr_de_da, narr ); mbgd_layer_copy->de_da = (float *) narr->ptr; mbgd_layer_copy->narr_de_dw = na_clone( mbgd_layer_orig->narr_de_dw ); GetNArray( mbgd_layer_copy->narr_de_dw, narr ); mbgd_layer_copy->de_dw = (float *) narr->ptr; return; }
// // NArray#load_from_shmemutex(shmemutex,timeout=-1) // VALUE rb_NArray_load_from_shmemutex(int argc,VALUE *argv,VALUE self) { VALUE shm,timeout; struct NARRAY *n_na; ShMemutex *ptr; long to=-1; size_t sz; if(rb_scan_args(argc,argv,"11",&shm,&timeout)==2) to=NUM2LONG(timeout); if(rb_obj_is_kind_of(shm, rb_const_get(rb_cObject, rb_intern("ShMemutex")))!=Qtrue) rb_raise(rb_eTypeError,"1st. argument must be ShMemutex object."); Data_Get_Struct(shm,ShMemutex,ptr); GetNArray(self,n_na); sz=n_na->total*na_sizeof[n_na->type]; if(!sz) rb_raise(rb_eTypeError,"NArray size is too small"); ptr->read(n_na->ptr,sz,to); return self; }
// // ShMemutex::write(src,timeout=-1) // VALUE rb_ShMemutex_write(int argc,VALUE *argv,VALUE self) { VALUE src,timeout; struct NARRAY *n_na; ShMemutex *ptr; long to=-1; void *buf; size_t sz; if(rb_scan_args(argc,argv,"11",&src,&timeout)==2) to=NUM2LONG(timeout); Data_Get_Struct(self,ShMemutex,ptr); if(IsNArray(src)){ GetNArray(src,n_na); sz=n_na->total*na_sizeof[n_na->type]; buf=(void *)n_na->ptr; } else{ StringValue(src); sz=RSTRING(src)->len+1; buf=RSTRING(src)->ptr; } if((!sz)||(!buf)) return INT2FIX(0); return INT2NUM(ptr->write(buf,sz,to)); }
/* @overload init_weights( mult = 1.0 ) * Initialises weights in all layers. * @param [Float] mult optional size factor * @return [RuNeNe::NNModel] self */ VALUE nn_model_rbobject__init_weights( int argc, VALUE* argv, VALUE self ) { NNModel *nn_model = get_nn_model_struct( self ); VALUE rv_mult; Layer_FF *layer_ff; float m = 1.0; int i, j, t; struct NARRAY *narr; rb_scan_args( argc, argv, "01", &rv_mult ); if ( ! NIL_P( rv_mult ) ) { m = NUM2FLT( rv_mult ); } for ( i = 0; i < nn_model->num_layers; i++ ) { // TODO: This only works for Layer_FF layers, we need a more flexible system Data_Get_Struct( nn_model->layers[i], Layer_FF, layer_ff ); layer_ff__init_weights( layer_ff ); if ( m != 0 ) { GetNArray( layer_ff->narr_weights, narr ); t = narr->total; for ( j = 0; j < t; j++ ) { layer_ff->weights[j] *= m; } } } return self; }
static int na_get_result_dimension_for_slice(VALUE self, int argc, VALUE *argv) { int i; int count_new=0; int count_rest=0; narray_t *na; VALUE a; GetNArray(self,na); if (na->size == 0) { rb_raise(nary_eShapeError, "cannot get element of empty array"); } for (i=0; i<argc; i++) { a = argv[i]; switch(TYPE(a)) { case T_FALSE: case T_SYMBOL: if (a==sym_rest || a==sym_tilde || a==Qfalse) { argv[i] = Qfalse; count_rest++; } else if (a==sym_new || a==sym_minus) { argv[i] = sym_new; count_new++; } } } return check_index_count(argc, na->ndim, count_new, count_rest); }
VALUE rb_narray_debug_info(VALUE self) { int i; narray_t *na; GetNArray(self,na); printf("%s:\n",rb_class2name(CLASS_OF(self))); printf(" id = 0x%"SZF"x\n", self); printf(" type = %d\n", na->type); printf(" flag = [%d,%d]\n", na->flag[0], na->flag[1]); printf(" size = %"SZF"d\n", na->size); printf(" ndim = %d\n", na->ndim); printf(" shape = 0x%"SZF"x\n", (size_t)na->shape); if (na->shape) { printf(" shape = ["); for (i=0;i<na->ndim;i++) printf(" %"SZF"d", na->shape[i]); printf(" ]\n"); } switch(na->type) { case NARRAY_DATA_T: case NARRAY_FILEMAP_T: rb_narray_debug_info_nadata(self); break; case NARRAY_VIEW_T: rb_narray_debug_info_naview(self); break; } return Qnil; }
void na_setup(VALUE self, int ndim, size_t *shape) { narray_t *na; GetNArray(self,na); na_setup_shape(na, ndim, shape); }
static VALUE nary_struct_cast_array(VALUE klass, VALUE rary) { volatile VALUE vnc, nary; narray_t *na; na_compose_t *nc; VALUE opt; ndfunc_arg_in_t ain[3] = {{rb_cArray,0},{Qnil,0},{sym_option}}; ndfunc_t ndf = { iter_nstruct_from_a, NO_LOOP, 3, 0, ain, 0 }; //fprintf(stderr,"rary:");rb_p(rary); //fprintf(stderr,"class_of(rary):");rb_p(CLASS_OF(rary)); vnc = na_ary_composition_for_struct(klass, rary); Data_Get_Struct(vnc, na_compose_t, nc); nary = rb_narray_new(klass, nc->ndim, nc->shape); GetNArray(nary,na); //fprintf(stderr,"na->size=%lu\n",na->size); //fprintf(stderr,"na->ndim=%d\n",na->ndim); if (na->size>0) { opt = nst_create_member_views(nary); rb_funcall(nary, rb_intern("allocate"), 0); na_ndloop_cast_rarray_to_narray2(&ndf, rary, nary, opt); } return nary; }
static VALUE nst_allocate(VALUE self) { narray_t *na; char *ptr; VALUE velmsz; GetNArray(self,na); switch(NA_TYPE(na)) { case NARRAY_DATA_T: ptr = NA_DATA_PTR(na); if (na->size > 0 && ptr == NULL) { velmsz = rb_const_get(CLASS_OF(self), rb_intern("element_byte_size")); ptr = xmalloc(NUM2SIZET(velmsz) * na->size); NA_DATA_PTR(na) = ptr; } break; case NARRAY_VIEW_T: rb_funcall(NA_VIEW_DATA(na), rb_intern("allocate"), 0); break; case NARRAY_FILEMAP_T: //ptr = ((narray_filemap_t*)na)->ptr; // to be implemented default: rb_bug("invalid narray type : %d",NA_TYPE(na)); } return self; }
static size_t check_array_1d(VALUE item, size_t size) { narray_t *na; size_t i, len; if (TYPE(item) == T_ARRAY) { len = RARRAY_LEN(item); if (size != len) { return 0; } for (i=0; i<len; i++) { if (!check_array(RARRAY_AREF(item,i))) { return 0; } } return 1; } if (RTEST(rb_obj_is_kind_of(item, cNArray))) { GetNArray(item,na); if (na->ndim == 1 && na->size == size) { return 1; } else { return 0; } } return 0; }
/* method: size() -- returns the total number of typeents */ static VALUE na_ndim(VALUE self) { narray_t *na; GetNArray(self,na); return INT2NUM(na->ndim); }
void na_array_to_internal_shape(VALUE self, VALUE ary, size_t *shape) { size_t i, n, c, s; VALUE v; narray_t *na; int flag = 0; n = RARRAY_LEN(ary); if (RTEST(self)) { GetNArray(self, na); flag = TEST_COLUMN_MAJOR(na); } if (flag) { c = n-1; s = -1; } else { c = 0; s = 1; } for (i=0; i<n; i++) { v = RARRAY_AREF(ary,i); if (!FIXNUM_P(v) && !rb_obj_is_kind_of(v, rb_cInteger)) { rb_raise(rb_eTypeError, "array size must be Integer"); } if (RTEST(rb_funcall(v, rb_intern("<"), 1, INT2FIX(0)))) { rb_raise(rb_eArgError,"size must be non-negative"); } shape[c] = NUM2SIZE(v); c += s; } }
char * na_get_pointer_for_read(VALUE self) { char *ptr; narray_t *na; GetNArray(self,na); //if (NA_TEST_LOCK(na)) { // rb_raise(rb_eRuntimeError, "cannot read locked NArray."); //} if (NA_TYPE(na) == NARRAY_DATA_T) { ptr = NA_DATA_PTR(na); } else { ptr = na_get_pointer(self); } if (NA_SIZE(na) > 0 && ptr == NULL) { rb_raise(rb_eRuntimeError,"cannot read unallocated NArray"); } //NA_SET_LOCK(na); return ptr; }
/* method: size() -- returns the total number of typeents */ static VALUE na_size(VALUE self) { narray_t *na; GetNArray(self,na); return SIZE2NUM(na->size); }
char * na_get_pointer_for_write(VALUE self) { char *ptr; narray_t *na; GetNArray(self,na); if (OBJ_FROZEN(self)) { rb_raise(rb_eRuntimeError, "cannot write to frozen NArray."); } if (NA_TYPE(na) == NARRAY_DATA_T) { ptr = NA_DATA_PTR(na); if (na->size > 0 && ptr == NULL) { rb_funcall(self, id_allocate, 0); ptr = NA_DATA_PTR(na); } } else { ptr = na_get_pointer(self); if (NA_SIZE(na) > 0 && ptr == NULL) { rb_raise(rb_eRuntimeError,"cannot write to unallocated NArray"); } } //NA_SET_LOCK(na); return ptr; }
/* @overload from_weights( weights, transfer_label = :sigmoid ) * Creates a new layer using the supplied weights array, which must be rank 2. * The inputs and bias are taken from the first dimension, and each output is assigned * from the second dimension. For example an array with shape [5,3] has 4 inputs and * 3 outputs. * @param [NArray] weights * @param [Symbol] transfer_label type of transfer function to use. * @return [RuNeNe::Layer::FeedForward] new layer using supplied weights. */ VALUE layer_ff_class_from_weights( int argc, VALUE* argv, VALUE self ) { volatile VALUE weights_in, tfn_type; struct NARRAY *na_weights; volatile VALUE val_weights; int i, o; rb_scan_args( argc, argv, "11", &weights_in, &tfn_type ); val_weights = na_cast_object(weights_in, NA_SFLOAT); GetNArray( val_weights, na_weights ); if ( na_weights->rank != 2 ) { rb_raise( rb_eArgError, "Weights rank should be 2, but got %d", na_weights->rank ); } i = na_weights->shape[0] - 1; if ( i < 1 ) { rb_raise( rb_eArgError, "Input size %d is less than minimum of 1", i ); } o = na_weights->shape[1]; if ( o < 1 ) { rb_raise( rb_eArgError, "Output size %d is less than minimum of 1", o ); } return layer_ff_new_ruby_object_from_weights( val_weights, symbol_to_transfer_type( tfn_type ) ); }
static void na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, na_index_arg_t *q) { VALUE idx; narray_t *na; narray_data_t *nidx; size_t k, n; ssize_t *nidxp; GetNArray(a,na); if (NA_NDIM(na) != 1) { rb_raise(rb_eIndexError, "should be 1-d NArray"); } n = NA_SIZE(na); idx = nary_new(cIndex,1,&n); na_store(idx,a); GetNArrayData(idx,nidx); nidxp = (ssize_t*)nidx->ptr; q->idx = ALLOC_N(size_t, n); for (k=0; k<n; k++) { q->idx[k] = na_range_check(nidxp[k], size, orig_dim); } q->n = n; q->beg = 0; q->step = 1; q->reduce = 0; q->orig_dim = orig_dim; }
VALUE nst_check_compatibility(VALUE nst, VALUE ary) { VALUE defs, def, type, item; long len, i; narray_t *nt; if (TYPE(ary) != T_ARRAY) { if (nst==CLASS_OF(ary)) { // same Struct return Qtrue; } return Qfalse; } defs = nst_definitions(nst); len = RARRAY_LEN(defs); if (len != RARRAY_LEN(ary)) { //puts("pass2"); return Qfalse; } for (i=0; i<len; i++) { def = RARRAY_AREF(defs,i); type = RARRAY_AREF(def,1); GetNArray(type,nt); item = RARRAY_AREF(ary,i); if (nt->ndim == 0) { if (check_array(item)) { //puts("pass3"); return Qfalse; } } else if (nt->ndim == 1) { if (!check_array_1d(item, nt->size)) { //puts("pass4"); return Qfalse; } } else { // multi-dimension member volatile VALUE vnc; na_compose_t *nc; int j; //rb_p(item); vnc = na_ary_composition(item); //puts("pass2"); Data_Get_Struct(vnc, na_compose_t, nc); if (nt->ndim != nc->ndim) { return Qfalse; } for (j=0; j<nc->ndim; j++) { if (nc->shape[j] != nt->shape[j]) { return Qfalse; } } return Qtrue; } } return Qtrue; }
static VALUE na_aref_md(int argc, VALUE *argv, VALUE self, int keep_dim, int result_nd) { VALUE args; // should be GC protected narray_t *na1; na_aref_md_data_t data; VALUE store = 0; VALUE idx; narray_t *nidx; GetNArray(self,na1); args = rb_ary_new4(argc,argv); if (argc == 1 && result_nd == 1) { idx = argv[0]; if (rb_obj_is_kind_of(idx, rb_cArray)) { idx = rb_apply(numo_cNArray,id_bracket,idx); } if (rb_obj_is_kind_of(idx, numo_cNArray)) { GetNArray(idx,nidx); if (NA_NDIM(nidx)>1) { store = nary_new(CLASS_OF(self),NA_NDIM(nidx),NA_SHAPE(nidx)); idx = na_flatten(idx); RARRAY_ASET(args,0,idx); } } // flatten should be done only for narray-view with non-uniform stride. if (na1->ndim > 1) { self = na_flatten(self); GetNArray(self,na1); } } data.args = args; data.self = self; data.store = store; data.ndim = result_nd; data.q = na_allocate_index_args(result_nd); data.na1 = na1; data.keep_dim = keep_dim; return rb_ensure(na_aref_md_protected, (VALUE)&data, na_aref_md_ensure, (VALUE)&data); }
VALUE na_address(VALUE self) { struct NARRAY *ary; void * ptr; VALUE ret; GetNArray(self,ary); ptr = ary->ptr; ret = ULL2NUM( sizeof(ptr) == 4 ? (unsigned long long int) (unsigned long int) ptr : (unsigned long long int) ptr ); return ret; }
static void na_mark_ref(struct NARRAY *ary) { struct NARRAY *a2; rb_gc_mark( ary->ref ); GetNArray(ary->ref,a2); if (a2->type == NA_ROBJ) na_mark_obj(a2); }
VALUE rb_rim_ipl2image(IplImage *img) { struct NARRAY *n_na; ID func; switch(img->depth){ case IPL_DEPTH_8U: case IPL_DEPTH_8S: func=rb_intern("byte"); break; case IPL_DEPTH_16U: case IPL_DEPTH_16S: func=rb_intern("sint"); break; case IPL_DEPTH_32S: func=rb_intern("lint"); break; case IPL_DEPTH_32F: func=rb_intern("sfloat"); break; case IPL_DEPTH_64F: func=rb_intern("dfloat"); break; default: return NULL; } VALUE oRimImage= rb_funcall(rb_const_get(rb_const_get(rb_cObject,rb_intern("Rim")), rb_intern("Image")), func, 3, INT2FIX(img->nChannels), INT2FIX(img->width), INT2FIX(img->height)); GetNArray(oRimImage,n_na); size_t lineWidth=img->nChannels*img->width*na_sizeof[n_na->type]; size_t sz=n_na->total*na_sizeof[n_na->type]; if((img->widthStep==lineWidth)&&(img->imageSize==sz)){ memcpy(n_na->ptr,img->imageData,sz); } else{ char *src,*dst; int i; for(i=0,src=img->imageData,dst=n_na->ptr; i<img->height; i++,src+=img->widthStep,dst+=lineWidth) memcpy(dst,src,lineWidth); } return oRimImage; }
void mbgd_layer__init( MBGDLayer *mbgd_layer, int num_inputs, int num_outputs ) { int i; int shape[2]; struct NARRAY *narr; float *narr_de_dz_ptr; float *narr_de_da_ptr; float *narr_de_dw_ptr; mbgd_layer->num_inputs = num_inputs; mbgd_layer->num_outputs = num_outputs; shape[0] = num_outputs; mbgd_layer->narr_de_dz = na_make_object( NA_SFLOAT, 1, shape, cNArray ); GetNArray( mbgd_layer->narr_de_dz, narr ); narr_de_dz_ptr = (float*) narr->ptr; for( i = 0; i < narr->total; i++ ) { narr_de_dz_ptr[i] = 0.0; } mbgd_layer->de_dz = (float *) narr->ptr; shape[0] = num_inputs; mbgd_layer->narr_de_da = na_make_object( NA_SFLOAT, 1, shape, cNArray ); GetNArray( mbgd_layer->narr_de_da, narr ); narr_de_da_ptr = (float*) narr->ptr; for( i = 0; i < narr->total; i++ ) { narr_de_da_ptr[i] = 0.0; } mbgd_layer->de_da = (float *) narr->ptr; shape[0] = num_inputs + 1; shape[1] = num_outputs; mbgd_layer->narr_de_dw = na_make_object( NA_SFLOAT, 2, shape, cNArray ); GetNArray( mbgd_layer->narr_de_dw, narr ); narr_de_dw_ptr = (float*) narr->ptr; for( i = 0; i < narr->total; i++ ) { narr_de_dw_ptr[i] = 0.0; } mbgd_layer->de_dw = (float *) narr->ptr; return; }
/* Replaces the contents of self with the contents of other narray. Used in dup and clone method. @overload initialize_copy(other) @param [Numo::NArray] other @return [Numo::NArray] self */ static VALUE na_initialize_copy(VALUE self, VALUE orig) { narray_t *na; GetNArray(orig,na); na_setup(self,NA_NDIM(na),NA_SHAPE(na)); na_store(self,orig); na_copy_flags(orig,self); return self; }
/* * call-seq: * narray.view => narray * * Return view of NArray */ VALUE na_make_view(VALUE self) { int i, nd; size_t j; size_t *idx1, *idx2; ssize_t stride; narray_t *na; narray_view_t *na1, *na2; volatile VALUE view; GetNArray(self,na); nd = na->ndim; view = na_s_allocate_view(CLASS_OF(self)); na_copy_flags(self, view); GetNArrayView(view, na2); na_setup_shape((narray_t*)na2, nd, na->shape); na2->stridx = ALLOC_N(stridx_t,nd); switch(na->type) { case NARRAY_DATA_T: case NARRAY_FILEMAP_T: stride = na_get_elmsz(self); for (i=nd; i--;) { SDX_SET_STRIDE(na2->stridx[i],stride); stride *= na->shape[i]; } na2->offset = 0; na2->data = self; break; case NARRAY_VIEW_T: GetNArrayView(self, na1); for (i=0; i<nd; i++) { if (SDX_IS_INDEX(na1->stridx[i])) { idx1 = SDX_GET_INDEX(na1->stridx[i]); idx2 = ALLOC_N(size_t,na1->base.shape[i]); for (j=0; j<na1->base.shape[i]; j++) { idx2[j] = idx1[j]; } SDX_SET_INDEX(na2->stridx[i],idx2); } else { na2->stridx[i] = na1->stridx[i]; } } na2->offset = na1->offset; na2->data = na1->data; break; } return view; }