void CRFWithLossLayer<Dtype>::LayerSetUp( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Get state number and state feature number from parameter state_num_ = this->layer_param_.crf_loss_param().state_num(); feature_num_ = this->layer_param_.crf_loss_param().feature_num(); nbest_ = this->layer_param_.crf_loss_param().nbest(); max_seq_length_ = this->layer_param_.crf_loss_param().max_seq_length(); for_training_ = this->layer_param_.crf_loss_param().for_training(); num_ = bottom[0]->count(0, 1); // Check whether to set up the start_weight, trans_weight_, state_weight if (this->blobs_.size() > 0) { LOG(INFO) << "Skipping parameter initialization"; } else { // Assume that this crf layer always have the threekind of parameter this->blobs_.resize(3); // Start param have only 1 dimension, params as many as state number vector<int> start_weight_shape(1); start_weight_shape[0] = state_num_; this->blobs_[0].reset(new Blob<Dtype>(start_weight_shape)); // Transition param have have fully trans connection (maybe partially in the future), 2 dims vector<int> trans_weight_shape(2); trans_weight_shape[0] = state_num_; trans_weight_shape[1] = state_num_; this->blobs_[1].reset(new Blob<Dtype>(trans_weight_shape)); // State feature num associated with local context has two dims for each state at any feature combination of context vector<int> state_weight_shape(2); state_weight_shape[0] = state_num_; state_weight_shape[1] = feature_num_; this->blobs_[2].reset(new Blob<Dtype>(state_weight_shape)); // Reshape the alpha matrix to the proper size; vector<int> alpha_shape(3); alpha_shape[0] = num_; alpha_shape[1] = max_seq_length_; alpha_shape[2] = state_num_; alpha_.Reshape(alpha_shape); // Reshape the beta matrix to the proper size; vector<int> beta_shape(4); beta_shape[0] = num_; beta_shape[1] = max_seq_length_; beta_shape[2] = state_num_; beta_shape[3] = 1; beta_.Reshape(beta_shape); // Reshape the gamma matrix to the proper size; vector<int> gamma_shape(4); gamma_shape[0] = num_; gamma_shape[1] = max_seq_length_; gamma_shape[2] = state_num_; gamma_shape[3] = 1; gamma_.Reshape(gamma_shape); // Reshape the epsilon matrix to the proper size; vector<int> ep_shape(4); ep_shape[0] = num_; ep_shape[1] = max_seq_length_; ep_shape[2] = state_num_; ep_shape[3] = state_num_; epsilon_.Reshape(ep_shape); // Reshape the buffer for state energy table; vector<int> se_shape(4); se_shape[0] = 1; se_shape[1] = max_seq_length_; se_shape[2] = state_num_; se_shape[3] = 1; buf_state_energy_.Reshape(se_shape); // Reshape the buffer for transposed bottom vector<int> tr_shape = bottom[0]->shape(); tr_shape[1] = max_seq_length_; tr_shape[2] = feature_num_; tr_shape[3] = 1; buf_bottom_transposed_.Reshape(tr_shape); // Reshape the buffer vector to a length of feature number vector<int> buf_feat_shape(1); buf_feat_shape[0] = feature_num_; buf_feature_.Reshape(buf_feat_shape); // Reshape the buffer vector to a length of state number vector<int> buf_2_shape(1); buf_2_shape[0] = state_num_; buf_state_.Reshape(buf_2_shape); // Reshape the multiplier vector<int> multi_shape(1); multi_shape[0] = max_seq_length_; multiplier_seq_len_.Reshape(multi_shape); // For simplicity, all the weight fillers are the same (probably not so good) for (int i = 0; i < 3; ++i) { shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>( this->layer_param_.crf_loss_param().weight_filler())); weight_filler->Fill(this->blobs_[i].get()); } } }
static int compute_alpha_shape(char* sql, vertex_t **res, int *res_count) { int SPIcode; void *SPIplan; Portal SPIportal; bool moredata = TRUE; int ntuples; vertex_t *vertices = NULL; int total_tuples = 0; vertex_columns_t vertex_columns = {.id= -1, .x= -1, .y= -1}; char *err_msg; int ret = -1; DBG("start alpha_shape\n"); SPIcode = SPI_connect(); if (SPIcode != SPI_OK_CONNECT) { elog(ERROR, "alpha_shape: couldn't open a connection to SPI"); return -1; } SPIplan = SPI_prepare(sql, 0, NULL); if (SPIplan == NULL) { elog(ERROR, "alpha_shape: couldn't create query plan via SPI"); return -1; } if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) { elog(ERROR, "alpha_shape: SPI_cursor_open('%s') returns NULL", sql); return -1; } while (moredata == TRUE) { SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT); if (vertex_columns.id == -1) { if (fetch_vertices_columns(SPI_tuptable, &vertex_columns) == -1) return finish(SPIcode, ret); } ntuples = SPI_processed; total_tuples += ntuples; if (!vertices) vertices = palloc(total_tuples * sizeof(vertex_t)); else vertices = repalloc(vertices, total_tuples * sizeof(vertex_t)); if (vertices == NULL) { elog(ERROR, "Out of memory"); return finish(SPIcode, ret); } if (ntuples > 0) { int t; SPITupleTable *tuptable = SPI_tuptable; TupleDesc tupdesc = SPI_tuptable->tupdesc; for (t = 0; t < ntuples; t++) { HeapTuple tuple = tuptable->vals[t]; fetch_vertex(&tuple, &tupdesc, &vertex_columns, &vertices[total_tuples - ntuples + t]); } SPI_freetuptable(tuptable); } else { moredata = FALSE; } } // if (total_tuples < 2) //this was the buggy code of the pgrouting project. // TODO: report this as a bug to the pgrouting project // the CGAL alpha-shape function crashes if called with less than three points!!! if (total_tuples == 0) { elog(ERROR, "Distance is too short. no vertex for alpha shape calculation. alpha shape calculation needs at least 3 vertices."); } if (total_tuples == 1) { elog(ERROR, "Distance is too short. only 1 vertex for alpha shape calculation. alpha shape calculation needs at least 3 vertices."); } if (total_tuples == 2) { elog(ERROR, "Distance is too short. only 2 vertices for alpha shape calculation. alpha shape calculation needs at least 3 vertices."); } if (total_tuples < 3) { // elog(ERROR, "Distance is too short ...."); return finish(SPIcode, ret); } DBG("Calling CGAL alpha-shape\n"); profstop("extract", prof_extract); profstart(prof_alpha); ret = alpha_shape(vertices, total_tuples, res, res_count, &err_msg); profstop("alpha", prof_alpha); profstart(prof_store); if (ret < 0) { //elog(ERROR, "Error computing shape: %s", err_msg); ereport(ERROR, (errcode(ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED), errmsg("Error computing shape: %s", err_msg))); } return finish(SPIcode, ret); } PG_FUNCTION_INFO_V1(alphashape); Datum alphashape(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; int call_cntr; int max_calls; TupleDesc tuple_desc; vertex_t *res = 0; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; int res_count; int ret; // XXX profiling messages are not thread safe profstart(prof_total); profstart(prof_extract); /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); ret = compute_alpha_shape(text2char(PG_GETARG_TEXT_P(0)), &res, &res_count); /* total number of tuples to be returned */ DBG("Conting tuples number\n"); funcctx->max_calls = res_count; funcctx->user_fctx = res; DBG("Total count %i", res_count); if (get_call_result_type(fcinfo, NULL, &tuple_desc) != TYPEFUNC_COMPOSITE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); funcctx->tuple_desc = BlessTupleDesc(tuple_desc); MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ DBG("Strange stuff doing\n"); funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; tuple_desc = funcctx->tuple_desc; res = (vertex_t*) funcctx->user_fctx; DBG("Trying to allocate some memory\n"); if (call_cntr < max_calls) /* do when there is more left to send */ { HeapTuple tuple; Datum result; Datum *values; char* nulls; /* This will work for some compilers. If it crashes with segfault, try to change the following block with this one values = palloc(3 * sizeof(Datum)); nulls = palloc(3 * sizeof(char)); values[0] = call_cntr; nulls[0] = ' '; values[1] = Float8GetDatum(res[call_cntr].x); nulls[1] = ' '; values[2] = Float8GetDatum(res[call_cntr].y); nulls[2] = ' '; */ values = palloc(2 * sizeof(Datum)); nulls = palloc(2 * sizeof(char)); values[0] = Float8GetDatum(res[call_cntr].x); nulls[0] = ' '; values[1] = Float8GetDatum(res[call_cntr].y); nulls[1] = ' '; DBG("Heap making\n"); tuple = heap_formtuple(tuple_desc, values, nulls); DBG("Datum making\n"); /* make the tuple into a datum */ result = HeapTupleGetDatum(tuple); DBG("Trying to free some memory\n"); /* clean up (this is not really necessary) */ pfree(values); pfree(nulls); SRF_RETURN_NEXT(funcctx, result); } else /* do when there is no more left */ { if (res) free(res); profstop("store", prof_store); profstop("total", prof_total); #ifdef PROFILE elog(NOTICE, "_________"); #endif SRF_RETURN_DONE(funcctx); } }