// extern void TF_SetAttrIntList(TF_OperationDescription* desc, // const char* attr_name, const int64_t* values, // int num_values); static PHP_METHOD(TensorFlow_OperationDescription, setAttrIntList) { zend_string *name; zval* values; ZEND_PARSE_PARAMETERS_START(2, 2) Z_PARAM_STR(name) Z_PARAM_ARRAY(values) ZEND_PARSE_PARAMETERS_END(); int64_t* tf_values = NULL; int tf_num_values = 0; HashTable *values_table = Z_ARRVAL_P(values); tf_num_values = zend_hash_num_elements(values_table); // count of array if (tf_num_values > 0) { tf_values = (int64_t*)emalloc(sizeof(int64_t) * tf_num_values); HashPosition pos; zval* element; int index = 0; zend_hash_internal_pointer_reset_ex(values_table, &pos); while (zend_hash_has_more_elements_ex(values_table, &pos) == SUCCESS) { if (!(element = zend_hash_get_current_data_ex(values_table, &pos))) { zend_throw_exception(spl_ce_InvalidArgumentException, "values something wrong", 0); return; } if (zval_get_type(element) != IS_LONG) { zend_throw_exception(spl_ce_InvalidArgumentException, "values must be array of integer", 0); return; } // insert tf_values tf_values[index] = Z_LVAL_P(element); // php_printf("%d \n", element->value.lval); zend_hash_move_forward_ex(values_table, &pos); index++; } } // int i; // for (i = 0; i < tf_num_values; i++) { // php_printf("values[%d] ? %d\n", i, tf_values[i]); // } // php_printf("tf_num_values ? %d\n", tf_num_values); // this t_tf_operation_description_object* intern = TF_OPERATION_DESCRIPTION_P_ZV(getThis()); t_tf_operation_description* node = intern->ptr; TF_SetAttrIntList(node->src, name->val, tf_values, tf_num_values); }
SCM tf_set_attr_int_list(SCM scm_description, SCM scm_name, SCM scm_values) { struct tf_description_t *self = get_tf_description(scm_description); int num_values = scm_ilength(scm_values); int64_t *values = scm_gc_malloc(sizeof(int64_t) * num_values, "tf-set-attr-int-list"); for (int i=0; i<num_values; i++) { values[i] = scm_to_int(scm_car(scm_values)); scm_values = scm_cdr(scm_values); }; char *name = scm_to_locale_string(scm_name); TF_SetAttrIntList(self->description, name, values, num_values); free(name); return SCM_UNDEFINED; }
static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op, ConvolutionalParams* params, const int layer) { TF_Operation *op; TF_OperationDescription *op_desc; TF_Output input; int64_t strides[] = {1, 1, 1, 1}; TF_Tensor *tensor; int64_t dims[4]; int dims_len; char name_buffer[NAME_BUFFER_SIZE]; int32_t size; size = params->input_num * params->output_num * params->kernel_size * params->kernel_size; input.index = 0; snprintf(name_buffer, NAME_BUFFER_SIZE, "conv_kernel%d", layer); op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer); TF_SetAttrType(op_desc, "dtype", TF_FLOAT); dims[0] = params->output_num; dims[1] = params->kernel_size; dims[2] = params->kernel_size; dims[3] = params->input_num; dims_len = 4; tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size * sizeof(float)); memcpy(TF_TensorData(tensor), params->kernel, size * sizeof(float)); TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } snprintf(name_buffer, NAME_BUFFER_SIZE, "transpose%d", layer); op_desc = TF_NewOperation(tf_model->graph, "Transpose", name_buffer); input.oper = op; TF_AddInput(op_desc, input); input.oper = transpose_op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); TF_SetAttrType(op_desc, "Tperm", TF_INT32); op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } snprintf(name_buffer, NAME_BUFFER_SIZE, "conv2d%d", layer); op_desc = TF_NewOperation(tf_model->graph, "Conv2D", name_buffer); input.oper = *cur_op; TF_AddInput(op_desc, input); input.oper = op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); TF_SetAttrIntList(op_desc, "strides", strides, 4); TF_SetAttrString(op_desc, "padding", "VALID", 5); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } snprintf(name_buffer, NAME_BUFFER_SIZE, "conv_biases%d", layer); op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer); TF_SetAttrType(op_desc, "dtype", TF_FLOAT); dims[0] = params->output_num; dims_len = 1; tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->output_num * sizeof(float)); memcpy(TF_TensorData(tensor), params->biases, params->output_num * sizeof(float)); TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } snprintf(name_buffer, NAME_BUFFER_SIZE, "bias_add%d", layer); op_desc = TF_NewOperation(tf_model->graph, "BiasAdd", name_buffer); input.oper = *cur_op; TF_AddInput(op_desc, input); input.oper = op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } snprintf(name_buffer, NAME_BUFFER_SIZE, "activation%d", layer); switch (params->activation){ case RELU: op_desc = TF_NewOperation(tf_model->graph, "Relu", name_buffer); break; case TANH: op_desc = TF_NewOperation(tf_model->graph, "Tanh", name_buffer); break; case SIGMOID: op_desc = TF_NewOperation(tf_model->graph, "Sigmoid", name_buffer); break; default: return DNN_ERROR; } input.oper = *cur_op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); *cur_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return DNN_ERROR; } return DNN_SUCCESS; }
static TF_Operation* add_conv_layers(TFModel *tf_model, const float **consts, const int64_t **consts_dims, const int *consts_dims_len, const char **activations, TF_Operation *input_op, int layers_num) { int i; TF_OperationDescription *op_desc; TF_Operation *op; TF_Operation *transpose_op; TF_Output input; int64_t strides[] = {1, 1, 1, 1}; int32_t *transpose_perm; TF_Tensor *tensor; int64_t transpose_perm_shape[] = {4}; #define NAME_BUFF_SIZE 256 char name_buffer[NAME_BUFF_SIZE]; op_desc = TF_NewOperation(tf_model->graph, "Const", "transpose_perm"); TF_SetAttrType(op_desc, "dtype", TF_INT32); tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 * sizeof(int32_t)); transpose_perm = (int32_t *)TF_TensorData(tensor); transpose_perm[0] = 1; transpose_perm[1] = 2; transpose_perm[2] = 3; transpose_perm[3] = 0; TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return NULL; } transpose_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return NULL; } input.index = 0; for (i = 0; i < layers_num; ++i){ snprintf(name_buffer, NAME_BUFF_SIZE, "conv_kernel%d", i); op = add_const_op(tf_model, consts[i << 1], consts_dims[i << 1], consts_dims_len[i << 1], name_buffer); if (TF_GetCode(tf_model->status) != TF_OK || op == NULL){ return NULL; } snprintf(name_buffer, NAME_BUFF_SIZE, "transpose%d", i); op_desc = TF_NewOperation(tf_model->graph, "Transpose", name_buffer); input.oper = op; TF_AddInput(op_desc, input); input.oper = transpose_op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); TF_SetAttrType(op_desc, "Tperm", TF_INT32); op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return NULL; } snprintf(name_buffer, NAME_BUFF_SIZE, "conv2d%d", i); op_desc = TF_NewOperation(tf_model->graph, "Conv2D", name_buffer); input.oper = input_op; TF_AddInput(op_desc, input); input.oper = op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); TF_SetAttrIntList(op_desc, "strides", strides, 4); TF_SetAttrString(op_desc, "padding", "VALID", 5); input_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return NULL; } snprintf(name_buffer, NAME_BUFF_SIZE, "conv_biases%d", i); op = add_const_op(tf_model, consts[(i << 1) + 1], consts_dims[(i << 1) + 1], consts_dims_len[(i << 1) + 1], name_buffer); if (TF_GetCode(tf_model->status) != TF_OK || op == NULL){ return NULL; } snprintf(name_buffer, NAME_BUFF_SIZE, "bias_add%d", i); op_desc = TF_NewOperation(tf_model->graph, "BiasAdd", name_buffer); input.oper = input_op; TF_AddInput(op_desc, input); input.oper = op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); input_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return NULL; } snprintf(name_buffer, NAME_BUFF_SIZE, "activation%d", i); op_desc = TF_NewOperation(tf_model->graph, activations[i], name_buffer); input.oper = input_op; TF_AddInput(op_desc, input); TF_SetAttrType(op_desc, "T", TF_FLOAT); input_op = TF_FinishOperation(op_desc, tf_model->status); if (TF_GetCode(tf_model->status) != TF_OK){ return NULL; } } return input_op; }